Пример #1
0
def update_endpoint_status(existing_finding, new_finding, user):
    # New endpoints are already added in serializers.py / views.py (see comment "# for existing findings: make sure endpoints are present or created")
    # So we only need to mitigate endpoints that are no longer present
    existing_finding_endpoint_status_list = existing_finding.endpoint_status.all(
    )
    new_finding_endpoints_list = new_finding.unsaved_endpoints
    endpoint_status_to_mitigate = list(
        filter(
            lambda existing_finding_endpoint_status:
            existing_finding_endpoint_status.endpoint not in
            new_finding_endpoints_list, existing_finding_endpoint_status_list))
    # Determine if this can be run async
    if settings.ASYNC_FINDING_IMPORT:
        chunk_list = importer_utils.chunk_list(endpoint_status_to_mitigate)
        # If there is only one chunk, then do not bother with async
        if len(chunk_list) < 2:
            mitigate_endpoint_status(endpoint_status_to_mitigate,
                                     user,
                                     kwuser=user,
                                     sync=True)
            return
        # First kick off all the workers
        for endpoint_status_list in chunk_list:
            mitigate_endpoint_status(endpoint_status_list,
                                     user,
                                     kwuser=user,
                                     sync=False)
    else:
        mitigate_endpoint_status(endpoint_status_to_mitigate,
                                 user,
                                 kwuser=user,
                                 sync=True)
Пример #2
0
def update_endpoint_status(existing_finding, new_finding, user):
    # New endpoints are already added in serializers.py / views.py (see comment "# for existing findings: make sure endpoints are present or created")
    # So we only need to mitigate endpoints that are no longer present
    # using `.all()` will mark as mitigated also `endpoint_status` with flags `false_positive`, `out_of_scope` and `risk_accepted`. This is a known issue. This is not a bug. This is a future.
    existing_finding_endpoint_status_list = existing_finding.endpoint_status.all()
    new_finding_endpoints_list = new_finding.unsaved_endpoints
    endpoint_status_to_mitigate = list(
        filter(
            lambda existing_finding_endpoint_status: existing_finding_endpoint_status.endpoint not in new_finding_endpoints_list,
            existing_finding_endpoint_status_list)
    )
    # Determine if this can be run async
    if settings.ASYNC_FINDING_IMPORT:
        chunk_list = importer_utils.chunk_list(endpoint_status_to_mitigate)
        # If there is only one chunk, then do not bother with async
        if len(chunk_list) < 2:
            mitigate_endpoint_status(endpoint_status_to_mitigate, user, kwuser=user, sync=True)
            return
        # First kick off all the workers
        for endpoint_status_list in chunk_list:
            mitigate_endpoint_status(endpoint_status_list, user, kwuser=user, sync=False)
    else:
        mitigate_endpoint_status(endpoint_status_to_mitigate, user, kwuser=user, sync=True)
Пример #3
0
    def reimport_scan(self,
                      scan,
                      scan_type,
                      test,
                      active=True,
                      verified=True,
                      tags=None,
                      minimum_severity=None,
                      user=None,
                      endpoints_to_add=None,
                      scan_date=None,
                      version=None,
                      branch_tag=None,
                      build_id=None,
                      commit_hash=None,
                      push_to_jira=None,
                      close_old_findings=True,
                      group_by=None,
                      api_scan_configuration=None,
                      service=None):

        logger.debug(f'REIMPORT_SCAN: parameters: {locals()}')

        user = user or get_current_user()

        now = timezone.now()

        if api_scan_configuration:
            if api_scan_configuration.product != test.engagement.product:
                raise ValidationError(
                    'API Scan Configuration has to be from same product as the Test'
                )
            if test.api_scan_configuration != api_scan_configuration:
                test.api_scan_configuration = api_scan_configuration
                test.save()

        # check if the parser that handle the scan_type manage tests
        parser = get_parser(scan_type)
        if hasattr(parser, 'get_tests'):
            logger.debug('REIMPORT_SCAN parser v2: Create parse findings')
            tests = parser.get_tests(scan_type, scan)
            # for now we only consider the first test in the list and artificially aggregate all findings of all tests
            # this is the same as the old behavior as current import/reimporter implementation doesn't handle the case
            # when there is more than 1 test
            parsed_findings = []
            for test_raw in tests:
                parsed_findings.extend(test_raw.findings)
        else:
            logger.debug('REIMPORT_SCAN: Parse findings')
            parsed_findings = parser.get_findings(scan, test)

        logger.debug('REIMPORT_SCAN: Processing findings')
        new_findings = []
        reactivated_findings = []
        findings_to_mitigate = []
        untouched_findings = []
        if settings.ASYNC_FINDING_IMPORT:
            chunk_list = importer_utils.chunk_list(parsed_findings)
            results_list = []
            # First kick off all the workers
            for findings_list in chunk_list:
                result = self.process_parsed_findings(
                    test,
                    findings_list,
                    scan_type,
                    user,
                    active,
                    verified,
                    minimum_severity=minimum_severity,
                    endpoints_to_add=endpoints_to_add,
                    push_to_jira=push_to_jira,
                    group_by=group_by,
                    now=now,
                    service=service,
                    scan_date=scan_date,
                    sync=False)
                # Since I dont want to wait until the task is done right now, save the id
                # So I can check on the task later
                results_list += [result]
            # After all tasks have been started, time to pull the results
            logger.debug('REIMPORT_SCAN: Collecting Findings')
            for results in results_list:
                serial_new_findings, serial_reactivated_findings, serial_findings_to_mitigate, serial_untouched_findings = results.get(
                )
                new_findings += [
                    next(serializers.deserialize("json", finding)).object
                    for finding in serial_new_findings
                ]
                reactivated_findings += [
                    next(serializers.deserialize("json", finding)).object
                    for finding in serial_reactivated_findings
                ]
                findings_to_mitigate += [
                    next(serializers.deserialize("json", finding)).object
                    for finding in serial_findings_to_mitigate
                ]
                untouched_findings += [
                    next(serializers.deserialize("json", finding)).object
                    for finding in serial_untouched_findings
                ]
            logger.debug('REIMPORT_SCAN: All Findings Collected')
            # Indicate that the test is not complete yet as endpoints will still be rolling in.
            test.percent_complete = 50
            test.save()
            importer_utils.update_test_progress(test)
        else:
            new_findings, reactivated_findings, findings_to_mitigate, untouched_findings = \
                self.process_parsed_findings(test, parsed_findings, scan_type, user, active, verified,
                                             minimum_severity=minimum_severity, endpoints_to_add=endpoints_to_add,
                                             push_to_jira=push_to_jira, group_by=group_by, now=now, service=service, scan_date=scan_date, sync=True)

        closed_findings = []
        if close_old_findings:
            logger.debug(
                'REIMPORT_SCAN: Closing findings no longer present in scan report'
            )
            closed_findings = self.close_old_findings(
                test,
                findings_to_mitigate,
                scan_date,
                user=user,
                push_to_jira=push_to_jira)

        logger.debug('REIMPORT_SCAN: Updating test/engagement timestamps')
        importer_utils.update_timestamps(test, version, branch_tag, build_id,
                                         commit_hash, now, scan_date)

        if settings.TRACK_IMPORT_HISTORY:
            logger.debug('REIMPORT_SCAN: Updating Import History')
            importer_utils.update_import_history(
                Test_Import.REIMPORT_TYPE, active, verified, tags,
                minimum_severity, endpoints_to_add, version, branch_tag,
                build_id, commit_hash, push_to_jira, close_old_findings, test,
                new_findings, closed_findings, reactivated_findings)

        logger.debug('REIMPORT_SCAN: Generating notifications')

        updated_count = len(closed_findings) + len(reactivated_findings) + len(
            new_findings)
        if updated_count > 0:
            notifications_helper.notify_scan_added(
                test,
                updated_count,
                new_findings=new_findings,
                findings_mitigated=closed_findings,
                findings_reactivated=reactivated_findings,
                findings_untouched=untouched_findings)

        logger.debug('REIMPORT_SCAN: Done')

        return test, updated_count, len(new_findings), len(
            closed_findings), len(reactivated_findings), len(
                untouched_findings)
Пример #4
0
    def process_parsed_findings(self,
                                test,
                                parsed_findings,
                                scan_type,
                                user,
                                active,
                                verified,
                                minimum_severity=None,
                                endpoints_to_add=None,
                                push_to_jira=None,
                                group_by=None,
                                now=timezone.now(),
                                service=None,
                                scan_date=None,
                                **kwargs):

        items = parsed_findings
        original_items = list(test.finding_set.all())
        new_items = []
        mitigated_count = 0
        finding_count = 0
        finding_added_count = 0
        reactivated_count = 0
        reactivated_items = []
        unchanged_count = 0
        unchanged_items = []

        logger.debug('starting reimport of %i items.',
                     len(items) if items else 0)
        from dojo.importers.reimporter.utils import (
            get_deduplication_algorithm_from_conf,
            match_new_finding_to_existing_finding, update_endpoint_status,
            reactivate_endpoint_status)
        deduplication_algorithm = get_deduplication_algorithm_from_conf(
            scan_type)

        i = 0
        logger.debug(
            'STEP 1: looping over findings from the reimported report and trying to match them to existing findings'
        )
        deduplicationLogger.debug(
            'Algorithm used for matching new findings to existing findings: %s',
            deduplication_algorithm)
        for item in items:
            # FIXME hack to remove when all parsers have unit tests for this attribute
            if item.severity.lower().startswith(
                    'info') and item.severity != 'Info':
                item.severity = 'Info'

            item.numerical_severity = Finding.get_numerical_severity(
                item.severity)

            if minimum_severity and (Finding.SEVERITIES[item.severity] >
                                     Finding.SEVERITIES[minimum_severity]):
                # finding's severity is below the configured threshold : ignoring the finding
                continue

            # existing findings may be from before we had component_name/version fields
            component_name = item.component_name if hasattr(
                item, 'component_name') else None
            component_version = item.component_version if hasattr(
                item, 'component_version') else None

            if not hasattr(item, 'test'):
                item.test = test

            item.service = service

            item.hash_code = item.compute_hash_code()
            deduplicationLogger.debug("item's hash_code: %s", item.hash_code)

            findings = match_new_finding_to_existing_finding(
                item, test, deduplication_algorithm, scan_type)

            deduplicationLogger.debug(
                'found %i findings matching with current new finding',
                len(findings))

            if findings:
                # existing finding found
                finding = findings[0]
                if finding.false_p or finding.out_of_scope or finding.risk_accepted:
                    logger.debug(
                        '%i: skipping existing finding (it is marked as false positive:%s and/or out of scope:%s or is a risk accepted:%s): %i:%s:%s:%s',
                        i, finding.false_p, finding.out_of_scope,
                        finding.risk_accepted, finding.id, finding,
                        finding.component_name, finding.component_version)
                elif finding.mitigated or finding.is_mitigated:
                    logger.debug('%i: reactivating: %i:%s:%s:%s', i,
                                 finding.id, finding, finding.component_name,
                                 finding.component_version)
                    finding.mitigated = None
                    finding.is_mitigated = False
                    finding.mitigated_by = None
                    finding.active = True
                    finding.verified = verified

                    # existing findings may be from before we had component_name/version fields
                    finding.component_name = finding.component_name if finding.component_name else component_name
                    finding.component_version = finding.component_version if finding.component_version else component_version

                    # don't dedupe before endpoints are added
                    finding.save(dedupe_option=False)
                    note = Notes(entry="Re-activated by %s re-upload." %
                                 scan_type,
                                 author=user)
                    note.save()

                    endpoint_statuses = finding.endpoint_status.all()

                    # Determine if this can be run async
                    if settings.ASYNC_FINDING_IMPORT:
                        chunk_list = importer_utils.chunk_list(
                            endpoint_statuses)
                        # If there is only one chunk, then do not bother with async
                        if len(chunk_list) < 2:
                            reactivate_endpoint_status(endpoint_statuses,
                                                       sync=True)
                        logger.debug('IMPORT_SCAN: Split endpoints into ' +
                                     str(len(chunk_list)) + ' chunks of ' +
                                     str(chunk_list[0]))
                        # First kick off all the workers
                        for endpoint_status_list in chunk_list:
                            reactivate_endpoint_status(endpoint_status_list,
                                                       sync=False)
                    else:
                        reactivate_endpoint_status(endpoint_statuses,
                                                   sync=True)

                    finding.notes.add(note)
                    reactivated_items.append(finding)
                    reactivated_count += 1
                else:
                    # existing findings may be from before we had component_name/version fields
                    logger.debug('%i: updating existing finding: %i:%s:%s:%s',
                                 i, finding.id, finding,
                                 finding.component_name,
                                 finding.component_version)
                    if not finding.component_name or not finding.component_version:
                        finding.component_name = finding.component_name if finding.component_name else component_name
                        finding.component_version = finding.component_version if finding.component_version else component_version
                        finding.save(dedupe_option=False)

                    unchanged_items.append(finding)
                    unchanged_count += 1
                if finding.dynamic_finding:
                    logger.debug(
                        "Re-import found an existing dynamic finding for this new finding. Checking the status of endpoints"
                    )
                    update_endpoint_status(finding, item, user)
            else:
                # no existing finding found
                item.reporter = user
                item.last_reviewed = timezone.now()
                item.last_reviewed_by = user
                item.verified = verified
                item.active = active

                # if scan_date was provided, override value from parser
                if scan_date:
                    item.date = scan_date

                # Save it. Don't dedupe before endpoints are added.
                item.save(dedupe_option=False)
                logger.debug(
                    '%i: reimport created new finding as no existing finding match: %i:%s:%s:%s',
                    i, item.id, item, item.component_name,
                    item.component_version)

                # only new items get auto grouped to avoid confusion around already existing items that are already grouped
                if settings.FEATURE_FINDING_GROUPS and group_by:
                    finding_helper.add_finding_to_auto_group(item, group_by)

                finding_added_count += 1
                new_items.append(item)
                finding = item

                if hasattr(item, 'unsaved_req_resp'):
                    for req_resp in item.unsaved_req_resp:
                        burp_rr = BurpRawRequestResponse(
                            finding=finding,
                            burpRequestBase64=base64.b64encode(
                                req_resp["req"].encode("utf-8")),
                            burpResponseBase64=base64.b64encode(
                                req_resp["resp"].encode("utf-8")))
                        burp_rr.clean()
                        burp_rr.save()

                if item.unsaved_request and item.unsaved_response:
                    burp_rr = BurpRawRequestResponse(
                        finding=finding,
                        burpRequestBase64=base64.b64encode(
                            item.unsaved_request.encode()),
                        burpResponseBase64=base64.b64encode(
                            item.unsaved_response.encode()))
                    burp_rr.clean()
                    burp_rr.save()

            # for existing findings: make sure endpoints are present or created
            if finding:
                finding_count += 1
                if settings.ASYNC_FINDING_IMPORT:
                    importer_utils.chunk_endpoints_and_disperse(
                        finding, test, item.unsaved_endpoints)
                else:
                    importer_utils.add_endpoints_to_unsaved_finding(
                        finding, test, item.unsaved_endpoints, sync=True)

                if endpoints_to_add:
                    if settings.ASYNC_FINDING_IMPORT:
                        importer_utils.chunk_endpoints_and_disperse(
                            finding, test, endpoints_to_add)
                    else:
                        importer_utils.add_endpoints_to_unsaved_finding(
                            finding, test, endpoints_to_add, sync=True)

                if item.unsaved_tags:
                    finding.tags = item.unsaved_tags

                if item.unsaved_files:
                    for unsaved_file in item.unsaved_files:
                        data = base64.b64decode(unsaved_file.get('data'))
                        title = unsaved_file.get('title', '<No title>')
                        file_upload, file_upload_created = FileUpload.objects.get_or_create(
                            title=title, )
                        file_upload.file.save(title, ContentFile(data))
                        file_upload.save()
                        finding.files.add(file_upload)

                # existing findings may be from before we had component_name/version fields
                finding.component_name = finding.component_name if finding.component_name else component_name
                finding.component_version = finding.component_version if finding.component_version else component_version

                # finding = new finding or existing finding still in the upload report
                # to avoid pushing a finding group multiple times, we push those outside of the loop
                if settings.FEATURE_FINDING_GROUPS and finding.finding_group:
                    finding.save()
                else:
                    finding.save(push_to_jira=push_to_jira)

        to_mitigate = set(original_items) - set(reactivated_items) - set(
            unchanged_items)
        untouched = set(unchanged_items) - set(to_mitigate)

        if settings.FEATURE_FINDING_GROUPS and push_to_jira:
            for finding_group in set([
                    finding.finding_group for finding in reactivated_items +
                    unchanged_items + new_items
                    if finding.finding_group is not None
            ]):
                jira_helper.push_to_jira(finding_group)
        sync = kwargs.get('sync', False)
        if not sync:
            serialized_new_items = [
                serializers.serialize('json', [
                    finding,
                ]) for finding in new_items
            ]
            serialized_reactivated_items = [
                serializers.serialize('json', [
                    finding,
                ]) for finding in reactivated_items
            ]
            serialized_to_mitigate = [
                serializers.serialize('json', [
                    finding,
                ]) for finding in to_mitigate
            ]
            serialized_untouched = [
                serializers.serialize('json', [
                    finding,
                ]) for finding in untouched
            ]
            return serialized_new_items, serialized_reactivated_items, serialized_to_mitigate, serialized_untouched

        return new_items, reactivated_items, to_mitigate, untouched
Пример #5
0
    def import_scan(self,
                    scan,
                    scan_type,
                    engagement,
                    lead,
                    environment,
                    active,
                    verified,
                    tags=None,
                    minimum_severity=None,
                    user=None,
                    endpoints_to_add=None,
                    scan_date=None,
                    version=None,
                    branch_tag=None,
                    build_id=None,
                    commit_hash=None,
                    push_to_jira=None,
                    close_old_findings=False,
                    group_by=None,
                    api_scan_configuration=None,
                    service=None,
                    title=None):

        logger.debug(f'IMPORT_SCAN: parameters: {locals()}')

        user = user or get_current_user()

        now = timezone.now()

        if api_scan_configuration and api_scan_configuration.product != engagement.product:
            raise ValidationError(
                'API Scan Configuration has to be from same product as  the Engagement'
            )

        # check if the parser that handle the scan_type manage tests
        # if yes, we parse the data first
        # after that we customize the Test_Type to reflect the data
        # This allow us to support some meta-formats like SARIF or the generic format
        parser = get_parser(scan_type)
        if hasattr(parser, 'get_tests'):
            logger.debug(
                'IMPORT_SCAN parser v2: Create Test and parse findings')
            tests = parser.get_tests(scan_type, scan)
            # for now we only consider the first test in the list and artificially aggregate all findings of all tests
            # this is the same as the old behavior as current import/reimporter implementation doesn't handle the case
            # when there is more than 1 test
            #
            # we also aggregate the label of the Test_type to show the user the original scan_type
            # only if they are different. This is to support meta format like SARIF
            # so a report that have the label 'CodeScanner' will be changed to 'CodeScanner Scan (SARIF)'
            test_type_name = scan_type
            if len(tests) > 0:
                if tests[0].type:
                    test_type_name = tests[0].type + " Scan"
                    if test_type_name != scan_type:
                        test_type_name = f"{test_type_name} ({scan_type})"

                test = self.create_test(
                    scan_type,
                    test_type_name,
                    engagement,
                    lead,
                    environment,
                    scan_date=scan_date,
                    tags=tags,
                    version=version,
                    branch_tag=branch_tag,
                    build_id=build_id,
                    commit_hash=commit_hash,
                    now=now,
                    api_scan_configuration=api_scan_configuration,
                    title=title)
                # This part change the name of the Test
                # we get it from the data of the parser
                test_raw = tests[0]
                if test_raw.name:
                    test.name = test_raw.name
                if test_raw.description:
                    test.description = test_raw.description
                test.save()

                logger.debug(
                    'IMPORT_SCAN parser v2: Parse findings (aggregate)')
                # currently we only support import one Test
                # so for parser that support multiple tests (like SARIF)
                # we aggregate all the findings into one uniq test
                parsed_findings = []
                for test_raw in tests:
                    parsed_findings.extend(test_raw.findings)
            else:
                logger.info(f'No tests found in import for {scan_type}')
        else:
            logger.debug('IMPORT_SCAN: Create Test')
            # by default test_type == scan_type
            test = self.create_test(
                scan_type,
                scan_type,
                engagement,
                lead,
                environment,
                scan_date=scan_date,
                tags=tags,
                version=version,
                branch_tag=branch_tag,
                build_id=build_id,
                commit_hash=commit_hash,
                now=now,
                api_scan_configuration=api_scan_configuration,
                title=title)

            logger.debug('IMPORT_SCAN: Parse findings')
            parser = get_parser(scan_type)
            parsed_findings = parser.get_findings(scan, test)

        logger.debug('IMPORT_SCAN: Processing findings')
        new_findings = []
        if settings.ASYNC_FINDING_IMPORT:
            chunk_list = importer_utils.chunk_list(parsed_findings)
            results_list = []
            # First kick off all the workers
            for findings_list in chunk_list:
                result = self.process_parsed_findings(
                    test,
                    findings_list,
                    scan_type,
                    user,
                    active,
                    verified,
                    minimum_severity=minimum_severity,
                    endpoints_to_add=endpoints_to_add,
                    push_to_jira=push_to_jira,
                    group_by=group_by,
                    now=now,
                    service=service,
                    scan_date=scan_date,
                    sync=False)
                # Since I dont want to wait until the task is done right now, save the id
                # So I can check on the task later
                results_list += [result]
            # After all tasks have been started, time to pull the results
            logger.info('IMPORT_SCAN: Collecting Findings')
            for results in results_list:
                serial_new_findings = results.get()
                new_findings += [
                    next(serializers.deserialize("json", finding)).object
                    for finding in serial_new_findings
                ]
            logger.info('IMPORT_SCAN: All Findings Collected')
            # Indicate that the test is not complete yet as endpoints will still be rolling in.
            test.percent_complete = 50
            test.save()
            importer_utils.update_test_progress(test)
        else:
            new_findings = self.process_parsed_findings(
                test,
                parsed_findings,
                scan_type,
                user,
                active,
                verified,
                minimum_severity=minimum_severity,
                endpoints_to_add=endpoints_to_add,
                push_to_jira=push_to_jira,
                group_by=group_by,
                now=now,
                service=service,
                scan_date=scan_date,
                sync=True)

        closed_findings = []
        if close_old_findings:
            logger.debug(
                'IMPORT_SCAN: Closing findings no longer present in scan report'
            )
            closed_findings = self.close_old_findings(
                test,
                scan_date,
                user=user,
                push_to_jira=push_to_jira,
                service=service)

        logger.debug('IMPORT_SCAN: Updating test/engagement timestamps')
        importer_utils.update_timestamps(test, version, branch_tag, build_id,
                                         commit_hash, now, scan_date)

        if settings.TRACK_IMPORT_HISTORY:
            logger.debug('IMPORT_SCAN: Updating Import History')
            importer_utils.update_import_history(
                Test_Import.IMPORT_TYPE, active, verified, tags,
                minimum_severity, endpoints_to_add, version, branch_tag,
                build_id, commit_hash, push_to_jira, close_old_findings, test,
                new_findings, closed_findings)

        logger.debug('IMPORT_SCAN: Generating notifications')
        notifications_helper.notify_test_created(test)
        updated_count = len(new_findings) + len(closed_findings)
        if updated_count > 0:
            notifications_helper.notify_scan_added(
                test,
                updated_count,
                new_findings=new_findings,
                findings_mitigated=closed_findings)

        logger.debug('IMPORT_SCAN: Done')

        return test, len(new_findings), len(closed_findings)