def add_alerts(self, runinterval): now = timezone.now() upcoming_engagements = Engagement.objects.filter( target_start__gt=now + timedelta(days=3), target_start__lt=now + timedelta(days=3) + runinterval).order_by('target_start') for engagement in upcoming_engagements: create_notification(event='upcoming_engagement', title='Upcoming engagement: %s' % engagement.name, engagement=engagement, recipients=[engagement.lead], url=reverse('view_engagement', args=(engagement.id, ))) stale_engagements = Engagement.objects.filter( target_start__gt=now - runinterval, target_end__lt=now, status='In Progress').order_by('-target_end') for eng in stale_engagements: create_notification( event='stale_engagement', title='Stale Engagement: %s' % eng.name, description='The engagement "%s" is stale. Target end was %s.' % (eng.name, eng.target_end.strftime("%b. %d, %Y")), url=reverse('view_engagement', args=(eng.id, )), recipients=[eng.lead]) system_settings = System_Settings.objects.get() if system_settings.engagement_auto_close: # Close Engagements older than user defined days close_days = system_settings.engagement_auto_close_days unclosed_engagements = Engagement.objects.filter( target_end__lte=now - timedelta(days=close_days), status='In Progress').order_by('target_end') for eng in unclosed_engagements: create_notification( event='auto_close_engagement', title=eng.name, description= 'The engagement "%s" has auto-closed. Target end was %s.' % (eng.name, eng.target_end.strftime("%b. %d, %Y")), url=reverse('view_engagement', args=(eng.id, )), recipients=[eng.lead]) unclosed_engagements.update(status="Completed", active=False, updated=timezone.now()) # Calculate grade if system_settings.enable_product_grade: products = Product.objects.all() for product in products: calculate_grade(product)
def post_process_finding_save(finding, dedupe_option=True, false_history=False, rules_option=True, product_grading_option=True, issue_updater_option=True, push_to_jira=False, user=None, *args, **kwargs): system_settings = System_Settings.objects.get() # STEP 1 run all status changing tasks sequentially to avoid race conditions if dedupe_option: if finding.hash_code is not None: if system_settings.enable_deduplication: from dojo.utils import do_dedupe_finding do_dedupe_finding(finding, *args, **kwargs) else: deduplicationLogger.debug( "skipping dedupe because it's disabled in system settings") else: deduplicationLogger.warning( "skipping dedupe because hash_code is None") if false_history: if system_settings.false_positive_history: from dojo.utils import do_false_positive_history do_false_positive_history(finding, *args, **kwargs) else: deduplicationLogger.debug( "skipping false positive history because it's disabled in system settings" ) # STEP 2 run all non-status changing tasks as celery tasks in the background if issue_updater_option: from dojo.tools import tool_issue_updater tool_issue_updater.async_tool_issue_update(finding) if product_grading_option: if system_settings.enable_product_grade: from dojo.utils import calculate_grade calculate_grade(finding.test.engagement.product) else: deduplicationLogger.debug( "skipping product grading because it's disabled in system settings" ) # Adding a snippet here for push to JIRA so that it's in one place if push_to_jira: logger.debug('pushing finding %s to jira from finding.save()', finding.pk) import dojo.jira_link.helper as jira_helper jira_helper.push_to_jira(finding)
def finding_bulk_update(request, tid): test = get_object_or_404(Test, id=tid) form = FindingBulkUpdateForm(request.POST) if request.method == "POST": finding_to_update = request.POST.getlist('finding_to_update') if request.POST.get('delete_bulk_findings') and finding_to_update: finds = Finding.objects.filter(test=test, id__in=finding_to_update) product = Product.objects.get(engagement__test=test) finds.delete() calculate_grade(product) else: if form.is_valid() and finding_to_update: finding_to_update = request.POST.getlist('finding_to_update') finds = Finding.objects.filter(test=test, id__in=finding_to_update) if form.cleaned_data['severity']: finds.update(severity=form.cleaned_data['severity'], numerical_severity=Finding.get_numerical_severity(form.cleaned_data['severity']), last_reviewed=timezone.now(), last_reviewed_by=request.user) if form.cleaned_data['status']: finds.update(active=form.cleaned_data['active'], verified=form.cleaned_data['verified'], false_p=form.cleaned_data['false_p'], out_of_scope=form.cleaned_data['out_of_scope'], last_reviewed=timezone.now(), last_reviewed_by=request.user) # Update the grade as bulk edits don't go through save if form.cleaned_data['severity'] or form.cleaned_data['status']: calculate_grade(test.engagement.product) for finding in finds: if JIRA_PKey.objects.filter(product=finding.test.engagement.product).count() == 0: log_jira_alert('Finding cannot be pushed to jira as there is no jira configuration for this product.', finding) else: old_status = finding.status() if form.cleaned_data['push_to_jira']: if JIRA_Issue.objects.filter(finding=finding).exists(): update_issue_task.delay(finding, old_status, True) else: add_issue_task.delay(finding, True) messages.add_message(request, messages.SUCCESS, 'Bulk edit of findings was successful. Check to make sure it is what you intended.', extra_tags='alert-success') else: messages.add_message(request, messages.ERROR, 'Unable to process bulk update. Required fields were not selected.', extra_tags='alert-danger') return HttpResponseRedirect(reverse('view_test', args=(test.id,)))
def finding_bulk_update(request, tid): test = get_object_or_404(Test, id=tid) form = FindingBulkUpdateForm(request.POST) if request.method == "POST": finding_to_update = request.POST.getlist('finding_to_update') if request.POST.get('delete_bulk_findings') and finding_to_update: finds = Finding.objects.filter(test=test, id__in=finding_to_update) product = Product.objects.get(engagement__test=test) finds.delete() calculate_grade(product) else: if form.is_valid() and finding_to_update: finding_to_update = request.POST.getlist('finding_to_update') finds = Finding.objects.filter(test=test, id__in=finding_to_update) if form.cleaned_data['severity']: finds.update( severity=form.cleaned_data['severity'], numerical_severity=Finding.get_numerical_severity( form.cleaned_data['severity']), last_reviewed=timezone.now(), last_reviewed_by=request.user) if form.cleaned_data['status']: finds.update( active=form.cleaned_data['active'], verified=form.cleaned_data['verified'], false_p=form.cleaned_data['false_p'], out_of_scope=form.cleaned_data['out_of_scope'], last_reviewed=timezone.now(), last_reviewed_by=request.user) # Update the grade as bulk edits don't go through save if form.cleaned_data['severity'] or form.cleaned_data['status']: calculate_grade(test.engagement.product) messages.add_message( request, messages.SUCCESS, 'Bulk edit of findings was successful. Check to make sure it is what you intended.', extra_tags='alert-success') else: messages.add_message( request, messages.ERROR, 'Unable to process bulk update. Required fields were not selected.', extra_tags='alert-danger') return HttpResponseRedirect(reverse('view_test', args=(test.id, )))
def post_process_finding_save(finding, dedupe_option=True, false_history=False, rules_option=True, product_grading_option=True, issue_updater_option=True, push_to_jira=False, user=None, *args, **kwargs): system_settings = System_Settings.objects.get() # STEP 1 run all status changing tasks sequentially to avoid race conditions if dedupe_option: if finding.hash_code is not None: if system_settings.enable_deduplication: from dojo.utils import do_dedupe_finding do_dedupe_finding(finding, *args, **kwargs) else: deduplicationLogger.debug("skipping dedupe because it's disabled in system settings") else: deduplicationLogger.warning("skipping dedupe because hash_code is None") if false_history: if system_settings.false_positive_history: from dojo.utils import do_false_positive_history do_false_positive_history(finding, *args, **kwargs) else: deduplicationLogger.debug("skipping false positive history because it's disabled in system settings") # STEP 2 run all non-status changing tasks as celery tasks in the background if issue_updater_option: from dojo.tools import tool_issue_updater tool_issue_updater.async_tool_issue_update(finding) if product_grading_option: if system_settings.enable_product_grade: from dojo.utils import calculate_grade calculate_grade(finding.test.engagement.product) else: deduplicationLogger.debug("skipping product grading because it's disabled in system settings") # Adding a snippet here for push to JIRA so that it's in one place if push_to_jira: logger.debug('pushing finding %s to jira from finding.save()', finding.pk) import dojo.jira_link.helper as jira_helper # current approach is that whenever a finding is in a group, the group will be pushed to JIRA # based on feedback we could introduct another push_group_to_jira boolean everywhere # but what about the push_all boolean? Let's see how this works for now and get some feedback. if finding.has_jira_issue or not finding.finding_group: jira_helper.push_to_jira(finding) elif finding.finding_group: jira_helper.push_to_jira(finding.finding_group)
def finding_bulk_update(request, tid): test = get_object_or_404(Test, id=tid) form = FindingBulkUpdateForm(request.POST) if request.method == "POST": finding_to_update = request.POST.getlist('finding_to_update') if request.POST.get('delete_bulk_findings') and finding_to_update: finds = Finding.objects.filter(test=test, id__in=finding_to_update) product = Product.objects.get(engagement__test=test) finds.delete() calculate_grade(product) else: if form.is_valid() and finding_to_update: finding_to_update = request.POST.getlist('finding_to_update') finds = Finding.objects.filter(test=test, id__in=finding_to_update) if form.cleaned_data['severity']: finds.update(severity=form.cleaned_data['severity'], numerical_severity=Finding.get_numerical_severity(form.cleaned_data['severity']), last_reviewed=timezone.now(), last_reviewed_by=request.user) if form.cleaned_data['status']: finds.update(active=form.cleaned_data['active'], verified=form.cleaned_data['verified'], false_p=form.cleaned_data['false_p'], out_of_scope=form.cleaned_data['out_of_scope'], last_reviewed=timezone.now(), last_reviewed_by=request.user) # Update the grade as bulk edits don't go through save if form.cleaned_data['severity'] or form.cleaned_data['status']: calculate_grade(test.engagement.product) messages.add_message(request, messages.SUCCESS, 'Bulk edit of findings was successful. Check to make sure it is what you intended.', extra_tags='alert-success') else: messages.add_message(request, messages.ERROR, 'Unable to process bulk update. Required fields were not selected.', extra_tags='alert-danger') return HttpResponseRedirect(reverse('view_test', args=(test.id,)))
def add_alerts(self, runinterval): now = timezone.now() upcoming_engagements = Engagement.objects.filter(target_start__gt=now + timedelta(days=3), target_start__lt=now + timedelta(days=3) + runinterval).order_by('target_start') for engagement in upcoming_engagements: create_notification(event='upcoming_engagement', title='Upcoming engagement: %s' % engagement.name, engagement=engagement, recipients=[engagement.lead], url=reverse('view_engagement', args=(engagement.id,))) stale_engagements = Engagement.objects.filter( target_start__gt=now - runinterval, target_end__lt=now, status='In Progress').order_by('-target_end') for eng in stale_engagements: create_notification(event='stale_engagement', title='Stale Engagement: %s' % eng.name, description='The engagement "%s" is stale. Target end was %s.' % (eng.name, eng.target_end.strftime("%b. %d, %Y")), url=reverse('view_engagement', args=(eng.id,)), recipients=[eng.lead]) system_settings = System_Settings.objects.get() if system_settings.engagement_auto_close: # Close Engagements older than user defined days close_days = system_settings.engagement_auto_close_days unclosed_engagements = Engagement.objects.filter(target_end__lte=now - timedelta(days=close_days), status='In Progress').order_by('target_end') for eng in unclosed_engagements: create_notification(event='auto_close_engagement', title=eng.name, description='The engagement "%s" has auto-closed. Target end was %s.' % (eng.name, eng.target_end.strftime("%b. %d, %Y")), url=reverse('view_engagement', args=(eng.id,)), recipients=[eng.lead]) unclosed_engagements.update(status="Completed", active=False, updated=timezone.now()) # Calculate grade if system_settings.enable_product_grade: products = Product.objects.all() for product in products: calculate_grade(product)
def product_grade(product): grade = "" system_settings = System_Settings.objects.get() if system_settings.enable_product_grade and product: prod_numeric_grade = product.prod_numeric_grade if prod_numeric_grade == "" or prod_numeric_grade is None: from dojo.utils import calculate_grade calculate_grade(product) if prod_numeric_grade: if prod_numeric_grade >= system_settings.product_grade_a: grade = 'A' elif prod_numeric_grade < system_settings.product_grade_a and prod_numeric_grade >= system_settings.product_grade_b: grade = 'B' elif prod_numeric_grade < system_settings.product_grade_b and prod_numeric_grade >= system_settings.product_grade_c: grade = 'C' elif prod_numeric_grade < system_settings.product_grade_c and prod_numeric_grade >= system_settings.product_grade_d: grade = 'D' elif prod_numeric_grade <= system_settings.product_grade_f: grade = 'F' return grade
def product_grade(product): grade = "" system_settings = System_Settings.objects.get() if system_settings.enable_product_grade: prod_numeric_grade = product.prod_numeric_grade if prod_numeric_grade is "" or prod_numeric_grade is None: from dojo.utils import calculate_grade calculate_grade(product) if prod_numeric_grade: if prod_numeric_grade >= system_settings.product_grade_a: grade = 'A' elif prod_numeric_grade < system_settings.product_grade_a and prod_numeric_grade >= system_settings.product_grade_b: grade = 'B' elif prod_numeric_grade < system_settings.product_grade_b and prod_numeric_grade >= system_settings.product_grade_c: grade = 'C' elif prod_numeric_grade < system_settings.product_grade_c and prod_numeric_grade >= system_settings.product_grade_d: grade = 'D' elif prod_numeric_grade <= system_settings.product_grade_f: grade = 'F' return grade
def handle(self, *args, **options): restrict_to_parsers = options['parser'] hash_code_only = options['hash_code_only'] dedupe_only = options['dedupe_only'] dedupe_sync = options['dedupe_sync'] if restrict_to_parsers is not None: findings = Finding.objects.filter( test__test_type__name__in=restrict_to_parsers) logger.info( "######## Will process only parsers %s and %d findings ########", *restrict_to_parsers, findings.count()) else: # add filter on id to make counts not slow on mysql findings = Finding.objects.all().filter(id__gt=0) logger.info( "######## Will process the full database with %d findings ########", findings.count()) # Phase 1: update hash_codes without deduplicating if not dedupe_only: logger.info( "######## Start Updating Hashcodes (foreground) ########") # only prefetch here for hash_code calculation finds = findings.prefetch_related('endpoints', 'test__test_type') mass_model_updater(Finding, finds, lambda f: generate_hash_code(f), fields=['hash_code'], order='asc', log_prefix='hash_code computation ') logger.info("######## Done Updating Hashcodes########") # Phase 2: deduplicate synchronously if not hash_code_only: if get_system_setting('enable_deduplication'): logger.info("######## Start deduplicating (%s) ########", ('foreground' if dedupe_sync else 'background')) if dedupe_sync: mass_model_updater(Finding, findings, lambda f: do_dedupe_finding(f), fields=None, order='desc', page_size=100, log_prefix='deduplicating ') else: # async tasks only need the id mass_model_updater(Finding, findings.only('id'), lambda f: do_dedupe_finding_task(f.id), fields=None, order='desc', log_prefix='deduplicating ') # update the grading (if enabled) logger.debug('Updating grades for products...') for product in Product.objects.all(): calculate_grade(product) logger.info("######## Done deduplicating (%s) ########", ('foreground' if dedupe_sync else 'tasks submitted to celery')) else: logger.debug( "skipping dedupe because it's disabled in system settings")
def finding_post_delete(sender, instance, **kwargs): calculate_grade(instance.test.engagement.product)
def endpoint_bulk_update_all(request, pid=None): if request.method == "POST": endpoints_to_update = request.POST.getlist('endpoints_to_update') finds = Endpoint.objects.filter( id__in=endpoints_to_update).order_by("endpoint_meta__product__id") total_endpoint_count = finds.count() if request.POST.get('delete_bulk_endpoints') and endpoints_to_update: if pid is None: if not request.user.is_staff: raise PermissionDenied else: product = get_object_or_404(Product, id=pid) user_has_permission_or_403(request.user, product, Permissions.Endpoint_Delete) finds = get_authorized_endpoints(Permissions.Endpoint_Delete, finds, request.user) skipped_endpoint_count = total_endpoint_count - finds.count() deleted_endpoint_count = finds.count() product_calc = list( Product.objects.filter( endpoint__id__in=endpoints_to_update).distinct()) finds.delete() for prod in product_calc: calculate_grade(prod) if skipped_endpoint_count > 0: add_error_message_to_response( 'Skipped deletion of {} endpoints because you are not authorized.' .format(skipped_endpoint_count)) if deleted_endpoint_count > 0: messages.add_message( request, messages.SUCCESS, 'Bulk delete of {} endpoints was successful.'.format( deleted_endpoint_count), extra_tags='alert-success') else: if endpoints_to_update: if pid is None: if not request.user.is_staff: raise PermissionDenied else: product = get_object_or_404(Product, id=pid) user_has_permission_or_403(request.user, product, Permissions.Finding_Edit) finds = get_authorized_endpoints(Permissions.Endpoint_Edit, finds, request.user) skipped_endpoint_count = total_endpoint_count - finds.count() updated_endpoint_count = finds.count() if skipped_endpoint_count > 0: add_error_message_to_response( 'Skipped mitigation of {} endpoints because you are not authorized.' .format(skipped_endpoint_count)) for endpoint in finds: endpoint.mitigated = not endpoint.mitigated endpoint.save() if updated_endpoint_count > 0: messages.add_message( request, messages.SUCCESS, 'Bulk mitigation of {} endpoints was successful.'. format(updated_endpoint_count), extra_tags='alert-success') else: messages.add_message( request, messages.ERROR, 'Unable to process bulk update. Required fields were not selected.', extra_tags='alert-danger') return HttpResponseRedirect(reverse('endpoint', args=()))