コード例 #1
0
def add_endpoints_to_unsaved_finding(finding, test, endpoints, **kwargs):
    logger.debug('IMPORT_SCAN: Adding ' + str(len(endpoints)) + ' endpoints to finding:' + str(finding))
    for endpoint in endpoints:
        try:
            endpoint.clean()
        except ValidationError as e:
            logger.warning("DefectDojo is storing broken endpoint because cleaning wasn't successful: "
                            "{}".format(e))
        ep = None
        try:
            ep, created = endpoint_get_or_create(
                protocol=endpoint.protocol,
                userinfo=endpoint.userinfo,
                host=endpoint.host,
                port=endpoint.port,
                path=endpoint.path,
                query=endpoint.query,
                fragment=endpoint.fragment,
                product=test.engagement.product)
        except (MultipleObjectsReturned):
            raise Exception("Endpoints in your database are broken. Please access {} and migrate them to new format or "
                            "remove them.".format(reverse('endpoint_migrate')))

        eps, created = Endpoint_Status.objects.get_or_create(
            finding=finding,
            endpoint=ep)
        if created:
            eps.date = finding.date
            eps.save()

        if ep and eps:
            ep.endpoint_status.add(eps)
            finding.endpoint_status.add(eps)
            finding.endpoints.add(ep)
    logger.debug('IMPORT_SCAN: ' + str(len(endpoints)) + ' imported')
コード例 #2
0
    def test_get_or_create(self):
        endpoint1, created1 = endpoint_get_or_create(protocol='http',
                                                     host='bar.foo')
        self.assertTrue(created1)

        endpoint2, created2 = endpoint_get_or_create(protocol='http',
                                                     host='bar.foo')
        self.assertFalse(created2)

        endpoint3, created3 = endpoint_get_or_create(protocol='http',
                                                     host='bar.foo',
                                                     port=80)
        self.assertFalse(created3)

        endpoint4, created4 = endpoint_get_or_create(protocol='http',
                                                     host='bar.foo',
                                                     port=8080)
        self.assertTrue(created4)

        endpoint5, created5 = endpoint_get_or_create(protocol='https',
                                                     host='bar.foo',
                                                     port=443)
        self.assertTrue(created5)

        endpoint6, created6 = endpoint_get_or_create(protocol='https',
                                                     host='bar.foo')
        self.assertFalse(created6)

        endpoint7, created7 = endpoint_get_or_create(protocol='https',
                                                     host='bar.foo',
                                                     port=8443)
        self.assertTrue(created7)
コード例 #3
0
def add_endpoints_to_unsaved_finding(finding, test, endpoints, **kwargs):
    logger.debug('IMPORT_SCAN: Adding ' + str(len(endpoints)) +
                 ' endpoints to finding:' + str(finding))
    for endpoint in endpoints:
        try:
            endpoint.clean()
        except ValidationError as e:
            logger.warning(
                "DefectDojo is storing broken endpoint because cleaning wasn't successful: "
                "{}".format(e))
        ep = None
        try:
            ep, created = endpoint_get_or_create(
                protocol=endpoint.protocol,
                userinfo=endpoint.userinfo,
                host=endpoint.host,
                port=endpoint.port,
                path=endpoint.path,
                query=endpoint.query,
                fragment=endpoint.fragment,
                product=test.engagement.product)
        except (MultipleObjectsReturned):
            pass

        eps = None
        try:
            eps, created = Endpoint_Status.objects.get_or_create(
                finding=finding, endpoint=ep)
        except (MultipleObjectsReturned):
            pass

        if ep and eps:
            ep.endpoint_status.add(eps)
            finding.endpoint_status.add(eps)
            finding.endpoints.add(ep)
    logger.debug('IMPORT_SCAN: ' + str(len(endpoints)) + ' imported')
コード例 #4
0
    def process_parsed_findings(self,
                                test,
                                parsed_findings,
                                scan_type,
                                user,
                                active,
                                verified,
                                minimum_severity=None,
                                endpoints_to_add=None,
                                push_to_jira=None,
                                group_by=None,
                                now=timezone.now()):
        logger.debug('endpoints_to_add: %s', endpoints_to_add)
        new_findings = []
        items = parsed_findings
        logger.debug('starting import of %i items.',
                     len(items) if items else 0)
        i = 0
        for item in items:
            sev = item.severity
            if sev == 'Information' or sev == 'Informational':
                sev = 'Info'

            item.severity = sev
            item.numerical_severity = Finding.get_numerical_severity(sev)

            if minimum_severity and (Finding.SEVERITIES[sev] >
                                     Finding.SEVERITIES[minimum_severity]):
                continue

            item.test = test
            item.reporter = user if user else get_current_user
            item.last_reviewed = now
            item.last_reviewed_by = user if user else get_current_user

            # Only set active/verified flags if they were NOT set by default value(True)
            if item.active:
                item.active = active
            if item.verified:
                item.verified = verified

            item.created = now
            item.updated = now
            item.save(dedupe_option=False)

            if settings.FEATURE_FINDING_GROUPS and group_by:
                finding_helper.add_finding_to_auto_group(item, group_by)

            if (hasattr(item, 'unsaved_req_resp')
                    and len(item.unsaved_req_resp) > 0):
                for req_resp in item.unsaved_req_resp:
                    burp_rr = BurpRawRequestResponse(
                        finding=item,
                        burpRequestBase64=base64.b64encode(
                            req_resp["req"].encode("utf-8")),
                        burpResponseBase64=base64.b64encode(
                            req_resp["resp"].encode("utf-8")))
                    burp_rr.clean()
                    burp_rr.save()

            if (item.unsaved_request is not None
                    and item.unsaved_response is not None):
                burp_rr = BurpRawRequestResponse(
                    finding=item,
                    burpRequestBase64=base64.b64encode(
                        item.unsaved_request.encode()),
                    burpResponseBase64=base64.b64encode(
                        item.unsaved_response.encode()))
                burp_rr.clean()
                burp_rr.save()

            for endpoint in item.unsaved_endpoints:
                try:
                    endpoint.clean()
                except ValidationError as e:
                    logger.warning(
                        "DefectDojo is storing broken endpoint because cleaning wasn't successful: "
                        "{}".format(e))

                try:
                    ep, created = endpoint_get_or_create(
                        protocol=endpoint.protocol,
                        userinfo=endpoint.userinfo,
                        host=endpoint.host,
                        port=endpoint.port,
                        path=endpoint.path,
                        query=endpoint.query,
                        fragment=endpoint.fragment,
                        product=test.engagement.product)
                except (MultipleObjectsReturned):
                    pass

                try:
                    eps, created = Endpoint_Status.objects.get_or_create(
                        finding=item, endpoint=ep)
                except (MultipleObjectsReturned):
                    pass

                ep.endpoint_status.add(eps)
                item.endpoint_status.add(eps)
                item.endpoints.add(ep)

            if endpoints_to_add:
                for endpoint in endpoints_to_add:
                    logger.debug('adding endpoint %s', endpoint)
                    # TODO Not sure what happens here, we get an endpoint model and try to create it again?
                    try:
                        endpoint.clean()
                    except ValidationError as e:
                        logger.warning(
                            "DefectDojo is storing broken endpoint because cleaning wasn't successful: "
                            "{}".format(e))

                    try:
                        ep, created = endpoint_get_or_create(
                            protocol=endpoint.protocol,
                            userinfo=endpoint.userinfo,
                            host=endpoint.host,
                            port=endpoint.port,
                            path=endpoint.path,
                            query=endpoint.query,
                            fragment=endpoint.fragment,
                            product=test.engagement.product)
                    except (MultipleObjectsReturned):
                        pass
                    try:
                        eps, created = Endpoint_Status.objects.get_or_create(
                            finding=item, endpoint=ep)
                    except (MultipleObjectsReturned):
                        pass

                    ep.endpoint_status.add(eps)
                    item.endpoints.add(ep)
                    item.endpoint_status.add(eps)

            if item.unsaved_tags:
                item.tags = item.unsaved_tags

            new_findings.append(item)
            # to avoid pushing a finding group multiple times, we push those outside of the loop
            if settings.FEATURE_FINDING_GROUPS and item.finding_group:
                item.save()
            else:
                item.save(push_to_jira=push_to_jira)

        if settings.FEATURE_FINDING_GROUPS and push_to_jira:
            for finding_group in set([
                    finding.finding_group for finding in new_findings
                    if finding.finding_group is not None
            ]):
                jira_helper.push_to_jira(finding_group)

        return new_findings
コード例 #5
0
    def process_parsed_findings(self, test, parsed_findings, scan_type, user, active, verified, minimum_severity=None,
                                endpoints_to_add=None, push_to_jira=None, group_by=None, now=timezone.now()):

        items = parsed_findings
        original_items = list(test.finding_set.all())
        new_items = []
        mitigated_count = 0
        finding_count = 0
        finding_added_count = 0
        reactivated_count = 0
        reactivated_items = []
        unchanged_count = 0
        unchanged_items = []

        logger.debug('starting reimport of %i items.', len(items) if items else 0)
        from dojo.importers.reimporter.utils import (
            get_deduplication_algorithm_from_conf,
            match_new_finding_to_existing_finding, update_endpoint_status)
        deduplication_algorithm = get_deduplication_algorithm_from_conf(scan_type)

        i = 0
        logger.debug('STEP 1: looping over findings from the reimported report and trying to match them to existing findings')
        deduplicationLogger.debug('Algorithm used for matching new findings to existing findings: %s', deduplication_algorithm)
        for item in items:
            # FIXME hack to remove when all parsers have unit tests for this attribute
            if item.severity.lower().startswith('info') and item.severity != 'Info':
                item.severity = 'Info'

            item.numerical_severity = Finding.get_numerical_severity(item.severity)

            if minimum_severity and (Finding.SEVERITIES[item.severity] >
                    Finding.SEVERITIES[minimum_severity]):
                # finding's severity is below the configured threshold : ignoring the finding
                continue

            # existing findings may be from before we had component_name/version fields
            component_name = item.component_name if hasattr(item, 'component_name') else None
            component_version = item.component_version if hasattr(item, 'component_version') else None

            if not hasattr(item, 'test'):
                item.test = test

            item.hash_code = item.compute_hash_code()
            deduplicationLogger.debug("item's hash_code: %s", item.hash_code)

            findings = match_new_finding_to_existing_finding(item, test, deduplication_algorithm, scan_type)

            deduplicationLogger.debug('found %i findings matching with current new finding', len(findings))

            if findings:
                # existing finding found
                finding = findings[0]
                if finding.false_p or finding.out_of_scope or finding.risk_accepted:
                    logger.debug('%i: skipping existing finding (it is marked as false positive:%s and/or out of scope:%s or is a risk accepted:%s): %i:%s:%s:%s', i, finding.false_p, finding.out_of_scope, finding.risk_accepted, finding.id, finding, finding.component_name, finding.component_version)
                elif finding.mitigated or finding.is_mitigated:
                    logger.debug('%i: reactivating: %i:%s:%s:%s', i, finding.id, finding, finding.component_name, finding.component_version)
                    finding.mitigated = None
                    finding.is_mitigated = False
                    finding.mitigated_by = None
                    finding.active = True
                    finding.verified = verified

                    # existing findings may be from before we had component_name/version fields
                    finding.component_name = finding.component_name if finding.component_name else component_name
                    finding.component_version = finding.component_version if finding.component_version else component_version

                    # don't dedupe before endpoints are added
                    finding.save(dedupe_option=False)
                    note = Notes(
                        entry="Re-activated by %s re-upload." % scan_type,
                        author=user)
                    note.save()
                    endpoint_status = finding.endpoint_status.all()
                    for status in endpoint_status:
                        status.mitigated_by = None
                        status.mitigated_time = None
                        status.mitigated = False
                        status.last_modified = timezone.now()
                        status.save()
                    finding.notes.add(note)
                    reactivated_items.append(finding)
                    reactivated_count += 1
                else:
                    # existing findings may be from before we had component_name/version fields
                    logger.debug('%i: updating existing finding: %i:%s:%s:%s', i, finding.id, finding, finding.component_name, finding.component_version)
                    if not finding.component_name or not finding.component_version:
                        finding.component_name = finding.component_name if finding.component_name else component_name
                        finding.component_version = finding.component_version if finding.component_version else component_version
                        finding.save(dedupe_option=False)

                    unchanged_items.append(finding)
                    unchanged_count += 1
                if finding.dynamic_finding:
                    logger.debug("Re-import found an existing dynamic finding for this new finding. Checking the status of endpoints")
                    update_endpoint_status(finding, item, user)
            else:
                # no existing finding found
                item.reporter = user
                item.last_reviewed = timezone.now()
                item.last_reviewed_by = user
                item.verified = verified
                item.active = active
                # Save it. Don't dedupe before endpoints are added.
                item.save(dedupe_option=False)
                logger.debug('%i: reimport created new finding as no existing finding match: %i:%s:%s:%s', i, item.id, item, item.component_name, item.component_version)

                # only new items get auto grouped to avoid confusion around already existing items that are already grouped
                if settings.FEATURE_FINDING_GROUPS and group_by:
                    finding_helper.add_finding_to_auto_group(item, group_by)

                finding_added_count += 1
                new_items.append(item)
                finding = item

                if hasattr(item, 'unsaved_req_resp'):
                    for req_resp in item.unsaved_req_resp:
                        burp_rr = BurpRawRequestResponse(
                            finding=finding,
                            burpRequestBase64=base64.b64encode(req_resp["req"].encode("utf-8")),
                            burpResponseBase64=base64.b64encode(req_resp["resp"].encode("utf-8")))
                        burp_rr.clean()
                        burp_rr.save()

                if item.unsaved_request and item.unsaved_response:
                    burp_rr = BurpRawRequestResponse(
                        finding=finding,
                        burpRequestBase64=base64.b64encode(item.unsaved_request.encode()),
                        burpResponseBase64=base64.b64encode(item.unsaved_response.encode()))
                    burp_rr.clean()
                    burp_rr.save()

            # for existing findings: make sure endpoints are present or created
            if finding:
                finding_count += 1
                for endpoint in item.unsaved_endpoints:
                    try:
                        endpoint.clean()
                    except ValidationError as e:
                        logger.warning("DefectDojo is storing broken endpoint because cleaning wasn't successful: "
                                       "{}".format(e))

                    try:
                        ep, created = endpoint_get_or_create(
                            protocol=endpoint.protocol,
                            userinfo=endpoint.userinfo,
                            host=endpoint.host,
                            port=endpoint.port,
                            path=endpoint.path,
                            query=endpoint.query,
                            fragment=endpoint.fragment,
                            product=test.engagement.product)
                    except (MultipleObjectsReturned):
                        pass

                    try:
                        eps, created = Endpoint_Status.objects.get_or_create(
                            finding=finding,
                            endpoint=ep)
                    except (MultipleObjectsReturned):
                        pass

                    ep.endpoint_status.add(eps)
                    finding.endpoints.add(ep)
                    finding.endpoint_status.add(eps)

                if endpoints_to_add:
                    for endpoint in endpoints_to_add:
                        # TODO Not sure what happens here, we get an endpoint model and try to create it again?
                        try:
                            endpoint.clean()
                        except ValidationError as e:
                            logger.warning("DefectDojo is storing broken endpoint because cleaning wasn't successful: "
                                           "{}".format(e))

                        try:
                            ep, created = endpoint_get_or_create(
                                protocol=endpoint.protocol,
                                userinfo=endpoint.userinfo,
                                host=endpoint.host,
                                port=endpoint.port,
                                path=endpoint.path,
                                query=endpoint.query,
                                fragment=endpoint.fragment,
                                product=test.engagement.product)
                        except (MultipleObjectsReturned):
                            pass
                        try:
                            eps, created = Endpoint_Status.objects.get_or_create(
                                finding=finding,
                                endpoint=ep)
                        except (MultipleObjectsReturned):
                            pass

                        ep.endpoint_status.add(eps)
                        finding.endpoints.add(ep)
                        finding.endpoint_status.add(eps)

                if item.unsaved_tags:
                    finding.tags = item.unsaved_tags

                # existing findings may be from before we had component_name/version fields
                finding.component_name = finding.component_name if finding.component_name else component_name
                finding.component_version = finding.component_version if finding.component_version else component_version

                # finding = new finding or existing finding still in the upload report
                # to avoid pushing a finding group multiple times, we push those outside of the loop
                if settings.FEATURE_FINDING_GROUPS and finding.finding_group:
                    finding.save()
                else:
                    finding.save(push_to_jira=push_to_jira)

        to_mitigate = set(original_items) - set(reactivated_items) - set(unchanged_items)
        untouched = set(unchanged_items) - set(to_mitigate)

        if settings.FEATURE_FINDING_GROUPS and push_to_jira:
            for finding_group in set([finding.finding_group for finding in reactivated_items + unchanged_items + new_items if finding.finding_group is not None]):
                jira_helper.push_to_jira(finding_group)

        return new_items, reactivated_items, to_mitigate, untouched