Ejemplo n.º 1
0
def post_process_finding_save(finding,
                              dedupe_option=True,
                              false_history=False,
                              rules_option=True,
                              product_grading_option=True,
                              issue_updater_option=True,
                              push_to_jira=False,
                              user=None,
                              *args,
                              **kwargs):

    system_settings = System_Settings.objects.get()

    # STEP 1 run all status changing tasks sequentially to avoid race conditions
    if dedupe_option:
        if finding.hash_code is not None:
            if system_settings.enable_deduplication:
                from dojo.utils import do_dedupe_finding
                do_dedupe_finding(finding, *args, **kwargs)
            else:
                deduplicationLogger.debug(
                    "skipping dedupe because it's disabled in system settings")
        else:
            deduplicationLogger.warning(
                "skipping dedupe because hash_code is None")

    if false_history:
        if system_settings.false_positive_history:
            from dojo.utils import do_false_positive_history
            do_false_positive_history(finding, *args, **kwargs)
        else:
            deduplicationLogger.debug(
                "skipping false positive history because it's disabled in system settings"
            )

    # STEP 2 run all non-status changing tasks as celery tasks in the background
    if issue_updater_option:
        from dojo.tools import tool_issue_updater
        tool_issue_updater.async_tool_issue_update(finding)

    if product_grading_option:
        if system_settings.enable_product_grade:
            from dojo.utils import calculate_grade
            calculate_grade(finding.test.engagement.product)
        else:
            deduplicationLogger.debug(
                "skipping product grading because it's disabled in system settings"
            )

    # Adding a snippet here for push to JIRA so that it's in one place
    if push_to_jira:
        logger.debug('pushing finding %s to jira from finding.save()',
                     finding.pk)
        import dojo.jira_link.helper as jira_helper
        jira_helper.push_to_jira(finding)
Ejemplo n.º 2
0
def post_process_finding_save(finding, dedupe_option=True, false_history=False, rules_option=True, product_grading_option=True,
             issue_updater_option=True, push_to_jira=False, user=None, *args, **kwargs):

    system_settings = System_Settings.objects.get()

    # STEP 1 run all status changing tasks sequentially to avoid race conditions
    if dedupe_option:
        if finding.hash_code is not None:
            if system_settings.enable_deduplication:
                from dojo.utils import do_dedupe_finding
                do_dedupe_finding(finding, *args, **kwargs)
            else:
                deduplicationLogger.debug("skipping dedupe because it's disabled in system settings")
        else:
            deduplicationLogger.warning("skipping dedupe because hash_code is None")

    if false_history:
        if system_settings.false_positive_history:
            from dojo.utils import do_false_positive_history
            do_false_positive_history(finding, *args, **kwargs)
        else:
            deduplicationLogger.debug("skipping false positive history because it's disabled in system settings")

    # STEP 2 run all non-status changing tasks as celery tasks in the background
    if issue_updater_option:
        from dojo.tools import tool_issue_updater
        tool_issue_updater.async_tool_issue_update(finding)

    if product_grading_option:
        if system_settings.enable_product_grade:
            from dojo.utils import calculate_grade
            calculate_grade(finding.test.engagement.product)
        else:
            deduplicationLogger.debug("skipping product grading because it's disabled in system settings")

    # Adding a snippet here for push to JIRA so that it's in one place
    if push_to_jira:
        logger.debug('pushing finding %s to jira from finding.save()', finding.pk)
        import dojo.jira_link.helper as jira_helper

        # current approach is that whenever a finding is in a group, the group will be pushed to JIRA
        # based on feedback we could introduct another push_group_to_jira boolean everywhere
        # but what about the push_all boolean? Let's see how this works for now and get some feedback.
        if finding.has_jira_issue or not finding.finding_group:
            jira_helper.push_to_jira(finding)
        elif finding.finding_group:
            jira_helper.push_to_jira(finding.finding_group)
Ejemplo n.º 3
0
    def handle(self, *args, **options):
        restrict_to_parsers = options['parser']
        hash_code_only = options['hash_code_only']
        dedupe_only = options['dedupe_only']
        dedupe_sync = options['dedupe_sync']

        if restrict_to_parsers is not None:
            findings = Finding.objects.filter(
                test__test_type__name__in=restrict_to_parsers)
            logger.info(
                "######## Will process only parsers %s and %d findings ########",
                *restrict_to_parsers, findings.count())
        else:
            # add filter on id to make counts not slow on mysql
            findings = Finding.objects.all().filter(id__gt=0)
            logger.info(
                "######## Will process the full database with %d findings ########",
                findings.count())

        # Phase 1: update hash_codes without deduplicating
        if not dedupe_only:
            logger.info(
                "######## Start Updating Hashcodes (foreground) ########")

            # only prefetch here for hash_code calculation
            finds = findings.prefetch_related('endpoints', 'test__test_type')
            mass_model_updater(Finding,
                               finds,
                               lambda f: generate_hash_code(f),
                               fields=['hash_code'],
                               order='asc',
                               log_prefix='hash_code computation ')

            logger.info("######## Done Updating Hashcodes########")

        # Phase 2: deduplicate synchronously
        if not hash_code_only:
            if get_system_setting('enable_deduplication'):
                logger.info("######## Start deduplicating (%s) ########",
                            ('foreground' if dedupe_sync else 'background'))
                if dedupe_sync:
                    mass_model_updater(Finding,
                                       findings,
                                       lambda f: do_dedupe_finding(f),
                                       fields=None,
                                       order='desc',
                                       page_size=100,
                                       log_prefix='deduplicating ')
                else:
                    # async tasks only need the id
                    mass_model_updater(Finding,
                                       findings.only('id'),
                                       lambda f: do_dedupe_finding_task(f.id),
                                       fields=None,
                                       order='desc',
                                       log_prefix='deduplicating ')

                # update the grading (if enabled)
                logger.debug('Updating grades for products...')
                for product in Product.objects.all():
                    calculate_grade(product)

                logger.info("######## Done deduplicating (%s) ########",
                            ('foreground'
                             if dedupe_sync else 'tasks submitted to celery'))
            else:
                logger.debug(
                    "skipping dedupe because it's disabled in system settings")