Exemple #1
0
    def process_parsed_findings(self,
                                test,
                                parsed_findings,
                                scan_type,
                                user,
                                active,
                                verified,
                                minimum_severity=None,
                                endpoints_to_add=None,
                                push_to_jira=None,
                                group_by=None,
                                now=timezone.now(),
                                service=None,
                                scan_date=None,
                                **kwargs):

        items = parsed_findings
        original_items = list(test.finding_set.all())
        new_items = []
        mitigated_count = 0
        finding_count = 0
        finding_added_count = 0
        reactivated_count = 0
        reactivated_items = []
        unchanged_count = 0
        unchanged_items = []

        logger.debug('starting reimport of %i items.',
                     len(items) if items else 0)
        from dojo.importers.reimporter.utils import (
            get_deduplication_algorithm_from_conf,
            match_new_finding_to_existing_finding, update_endpoint_status,
            reactivate_endpoint_status)
        deduplication_algorithm = get_deduplication_algorithm_from_conf(
            scan_type)

        i = 0
        logger.debug(
            'STEP 1: looping over findings from the reimported report and trying to match them to existing findings'
        )
        deduplicationLogger.debug(
            'Algorithm used for matching new findings to existing findings: %s',
            deduplication_algorithm)
        for item in items:
            # FIXME hack to remove when all parsers have unit tests for this attribute
            if item.severity.lower().startswith(
                    'info') and item.severity != 'Info':
                item.severity = 'Info'

            item.numerical_severity = Finding.get_numerical_severity(
                item.severity)

            if minimum_severity and (Finding.SEVERITIES[item.severity] >
                                     Finding.SEVERITIES[minimum_severity]):
                # finding's severity is below the configured threshold : ignoring the finding
                continue

            # existing findings may be from before we had component_name/version fields
            component_name = item.component_name if hasattr(
                item, 'component_name') else None
            component_version = item.component_version if hasattr(
                item, 'component_version') else None

            if not hasattr(item, 'test'):
                item.test = test

            item.service = service

            item.hash_code = item.compute_hash_code()
            deduplicationLogger.debug("item's hash_code: %s", item.hash_code)

            findings = match_new_finding_to_existing_finding(
                item, test, deduplication_algorithm, scan_type)

            deduplicationLogger.debug(
                'found %i findings matching with current new finding',
                len(findings))

            if findings:
                # existing finding found
                finding = findings[0]
                if finding.false_p or finding.out_of_scope or finding.risk_accepted:
                    logger.debug(
                        '%i: skipping existing finding (it is marked as false positive:%s and/or out of scope:%s or is a risk accepted:%s): %i:%s:%s:%s',
                        i, finding.false_p, finding.out_of_scope,
                        finding.risk_accepted, finding.id, finding,
                        finding.component_name, finding.component_version)
                elif finding.mitigated or finding.is_mitigated:
                    logger.debug('%i: reactivating: %i:%s:%s:%s', i,
                                 finding.id, finding, finding.component_name,
                                 finding.component_version)
                    finding.mitigated = None
                    finding.is_mitigated = False
                    finding.mitigated_by = None
                    finding.active = True
                    finding.verified = verified

                    # existing findings may be from before we had component_name/version fields
                    finding.component_name = finding.component_name if finding.component_name else component_name
                    finding.component_version = finding.component_version if finding.component_version else component_version

                    # don't dedupe before endpoints are added
                    finding.save(dedupe_option=False)
                    note = Notes(entry="Re-activated by %s re-upload." %
                                 scan_type,
                                 author=user)
                    note.save()

                    endpoint_statuses = finding.endpoint_status.all()

                    # Determine if this can be run async
                    if settings.ASYNC_FINDING_IMPORT:
                        chunk_list = importer_utils.chunk_list(
                            endpoint_statuses)
                        # If there is only one chunk, then do not bother with async
                        if len(chunk_list) < 2:
                            reactivate_endpoint_status(endpoint_statuses,
                                                       sync=True)
                        logger.debug('IMPORT_SCAN: Split endpoints into ' +
                                     str(len(chunk_list)) + ' chunks of ' +
                                     str(chunk_list[0]))
                        # First kick off all the workers
                        for endpoint_status_list in chunk_list:
                            reactivate_endpoint_status(endpoint_status_list,
                                                       sync=False)
                    else:
                        reactivate_endpoint_status(endpoint_statuses,
                                                   sync=True)

                    finding.notes.add(note)
                    reactivated_items.append(finding)
                    reactivated_count += 1
                else:
                    # existing findings may be from before we had component_name/version fields
                    logger.debug('%i: updating existing finding: %i:%s:%s:%s',
                                 i, finding.id, finding,
                                 finding.component_name,
                                 finding.component_version)
                    if not finding.component_name or not finding.component_version:
                        finding.component_name = finding.component_name if finding.component_name else component_name
                        finding.component_version = finding.component_version if finding.component_version else component_version
                        finding.save(dedupe_option=False)

                    unchanged_items.append(finding)
                    unchanged_count += 1
                if finding.dynamic_finding:
                    logger.debug(
                        "Re-import found an existing dynamic finding for this new finding. Checking the status of endpoints"
                    )
                    update_endpoint_status(finding, item, user)
            else:
                # no existing finding found
                item.reporter = user
                item.last_reviewed = timezone.now()
                item.last_reviewed_by = user
                item.verified = verified
                item.active = active

                # if scan_date was provided, override value from parser
                if scan_date:
                    item.date = scan_date

                # Save it. Don't dedupe before endpoints are added.
                item.save(dedupe_option=False)
                logger.debug(
                    '%i: reimport created new finding as no existing finding match: %i:%s:%s:%s',
                    i, item.id, item, item.component_name,
                    item.component_version)

                # only new items get auto grouped to avoid confusion around already existing items that are already grouped
                if settings.FEATURE_FINDING_GROUPS and group_by:
                    finding_helper.add_finding_to_auto_group(item, group_by)

                finding_added_count += 1
                new_items.append(item)
                finding = item

                if hasattr(item, 'unsaved_req_resp'):
                    for req_resp in item.unsaved_req_resp:
                        burp_rr = BurpRawRequestResponse(
                            finding=finding,
                            burpRequestBase64=base64.b64encode(
                                req_resp["req"].encode("utf-8")),
                            burpResponseBase64=base64.b64encode(
                                req_resp["resp"].encode("utf-8")))
                        burp_rr.clean()
                        burp_rr.save()

                if item.unsaved_request and item.unsaved_response:
                    burp_rr = BurpRawRequestResponse(
                        finding=finding,
                        burpRequestBase64=base64.b64encode(
                            item.unsaved_request.encode()),
                        burpResponseBase64=base64.b64encode(
                            item.unsaved_response.encode()))
                    burp_rr.clean()
                    burp_rr.save()

            # for existing findings: make sure endpoints are present or created
            if finding:
                finding_count += 1
                if settings.ASYNC_FINDING_IMPORT:
                    importer_utils.chunk_endpoints_and_disperse(
                        finding, test, item.unsaved_endpoints)
                else:
                    importer_utils.add_endpoints_to_unsaved_finding(
                        finding, test, item.unsaved_endpoints, sync=True)

                if endpoints_to_add:
                    if settings.ASYNC_FINDING_IMPORT:
                        importer_utils.chunk_endpoints_and_disperse(
                            finding, test, endpoints_to_add)
                    else:
                        importer_utils.add_endpoints_to_unsaved_finding(
                            finding, test, endpoints_to_add, sync=True)

                if item.unsaved_tags:
                    finding.tags = item.unsaved_tags

                if item.unsaved_files:
                    for unsaved_file in item.unsaved_files:
                        data = base64.b64decode(unsaved_file.get('data'))
                        title = unsaved_file.get('title', '<No title>')
                        file_upload, file_upload_created = FileUpload.objects.get_or_create(
                            title=title, )
                        file_upload.file.save(title, ContentFile(data))
                        file_upload.save()
                        finding.files.add(file_upload)

                # existing findings may be from before we had component_name/version fields
                finding.component_name = finding.component_name if finding.component_name else component_name
                finding.component_version = finding.component_version if finding.component_version else component_version

                # finding = new finding or existing finding still in the upload report
                # to avoid pushing a finding group multiple times, we push those outside of the loop
                if settings.FEATURE_FINDING_GROUPS and finding.finding_group:
                    finding.save()
                else:
                    finding.save(push_to_jira=push_to_jira)

        to_mitigate = set(original_items) - set(reactivated_items) - set(
            unchanged_items)
        untouched = set(unchanged_items) - set(to_mitigate)

        if settings.FEATURE_FINDING_GROUPS and push_to_jira:
            for finding_group in set([
                    finding.finding_group for finding in reactivated_items +
                    unchanged_items + new_items
                    if finding.finding_group is not None
            ]):
                jira_helper.push_to_jira(finding_group)
        sync = kwargs.get('sync', False)
        if not sync:
            serialized_new_items = [
                serializers.serialize('json', [
                    finding,
                ]) for finding in new_items
            ]
            serialized_reactivated_items = [
                serializers.serialize('json', [
                    finding,
                ]) for finding in reactivated_items
            ]
            serialized_to_mitigate = [
                serializers.serialize('json', [
                    finding,
                ]) for finding in to_mitigate
            ]
            serialized_untouched = [
                serializers.serialize('json', [
                    finding,
                ]) for finding in untouched
            ]
            return serialized_new_items, serialized_reactivated_items, serialized_to_mitigate, serialized_untouched

        return new_items, reactivated_items, to_mitigate, untouched
Exemple #2
0
    def process_parsed_findings(self,
                                test,
                                parsed_findings,
                                scan_type,
                                user,
                                active,
                                verified,
                                minimum_severity=None,
                                endpoints_to_add=None,
                                push_to_jira=None,
                                group_by=None,
                                now=timezone.now(),
                                service=None,
                                scan_date=None,
                                **kwargs):
        logger.debug('endpoints_to_add: %s', endpoints_to_add)
        new_findings = []
        items = parsed_findings
        logger.debug('starting import of %i items.',
                     len(items) if items else 0)
        i = 0
        for item in items:
            # FIXME hack to remove when all parsers have unit tests for this attribute
            if item.severity.lower().startswith(
                    'info') and item.severity != 'Info':
                item.severity = 'Info'

            item.numerical_severity = Finding.get_numerical_severity(
                item.severity)

            if minimum_severity and (Finding.SEVERITIES[item.severity] >
                                     Finding.SEVERITIES[minimum_severity]):
                # finding's severity is below the configured threshold : ignoring the finding
                continue

            item.test = test
            item.reporter = user if user else get_current_user
            item.last_reviewed = now
            item.last_reviewed_by = user if user else get_current_user

            logger.debug(
                'process_parsed_findings: active from report: %s, verified from report: %s',
                item.active, item.verified)
            # active, verified parameters = parameters from the gui or api call.
            # item.active, item.verified = values from the report / the parser
            # if either value of active (from the parser or from the api/gui) is false, final status is inactive
            #   else final status is active
            # if either value of verified (from the parser or from the api/gui) is false, final status is not verified
            #   else final status is verified
            # Note that:
            #   - the API (active/verified parameters) values default to True if not specified
            #   - the parser values default to true if not set by the parser (as per the default value in models.py)
            #   - there is no "not specified" in the GUI (not ticked means not active/not verified)
            if item.active:
                item.active = active
            if item.verified:
                item.verified = verified

            # if scan_date was provided, override value from parser
            if scan_date:
                item.date = scan_date

            item.service = service

            item.save(dedupe_option=False)

            if settings.FEATURE_FINDING_GROUPS and group_by:
                finding_helper.add_finding_to_auto_group(item, group_by)

            if (hasattr(item, 'unsaved_req_resp')
                    and len(item.unsaved_req_resp) > 0):
                for req_resp in item.unsaved_req_resp:
                    burp_rr = BurpRawRequestResponse(
                        finding=item,
                        burpRequestBase64=base64.b64encode(
                            req_resp["req"].encode("utf-8")),
                        burpResponseBase64=base64.b64encode(
                            req_resp["resp"].encode("utf-8")))
                    burp_rr.clean()
                    burp_rr.save()

            if (item.unsaved_request is not None
                    and item.unsaved_response is not None):
                burp_rr = BurpRawRequestResponse(
                    finding=item,
                    burpRequestBase64=base64.b64encode(
                        item.unsaved_request.encode()),
                    burpResponseBase64=base64.b64encode(
                        item.unsaved_response.encode()))
                burp_rr.clean()
                burp_rr.save()

            if settings.ASYNC_FINDING_IMPORT:
                importer_utils.chunk_endpoints_and_disperse(
                    item, test, item.unsaved_endpoints)
            else:
                importer_utils.add_endpoints_to_unsaved_finding(
                    item, test, item.unsaved_endpoints, sync=True)

            if endpoints_to_add:
                if settings.ASYNC_FINDING_IMPORT:
                    importer_utils.chunk_endpoints_and_disperse(
                        item, test, endpoints_to_add)
                else:
                    importer_utils.add_endpoints_to_unsaved_finding(
                        item, test, endpoints_to_add, sync=True)

            if item.unsaved_tags:
                item.tags = item.unsaved_tags

            if item.unsaved_files:
                for unsaved_file in item.unsaved_files:
                    data = base64.b64decode(unsaved_file.get('data'))
                    title = unsaved_file.get('title', '<No title>')
                    file_upload, file_upload_created = FileUpload.objects.get_or_create(
                        title=title, )
                    file_upload.file.save(title, ContentFile(data))
                    file_upload.save()
                    item.files.add(file_upload)

            new_findings.append(item)
            # to avoid pushing a finding group multiple times, we push those outside of the loop
            if settings.FEATURE_FINDING_GROUPS and item.finding_group:
                item.save()
            else:
                item.save(push_to_jira=push_to_jira)

        if settings.FEATURE_FINDING_GROUPS and push_to_jira:
            for finding_group in set([
                    finding.finding_group for finding in new_findings
                    if finding.finding_group is not None
            ]):
                jira_helper.push_to_jira(finding_group)
        sync = kwargs.get('sync', False)
        if not sync:
            return [
                serializers.serialize('json', [
                    finding,
                ]) for finding in new_findings
            ]
        return new_findings