コード例 #1
0
ファイル: notify.py プロジェクト: haojiang1/sentry
    def rule_notify(self, event, futures):
        rules = []
        for future in futures:
            rules.append(future.rule)
            if not future.kwargs:
                continue
            raise NotImplementedError('The default behavior for notification de-duplication does not support args')

        if hasattr(self, 'notify_digest'):
            project = event.group.project

            # If digest delivery is disabled, we still need to send a
            # notification -- we also need to check rate limits, since
            # ``should_notify`` skips this step if the plugin supports digests.
            if not features.has('projects:digests:deliver', project):
                if self.__is_rate_limited(event.group, event):
                    logger = logging.getLogger('sentry.plugins.{0}'.format(self.get_conf_key()))
                    logger.info('Notification for project %r dropped due to rate limiting', project)
                    return

                notification = Notification(event=event, rules=rules)
                self.notify(notification)

            if features.has('projects:digests:store', project):
                key = unsplit_key(self, event.group.project)
                if digests.add(key, event_to_record(event, rules)):
                    deliver_digest.delay(key)

        else:
            notification = Notification(event=event, rules=rules)
            self.notify(notification)
コード例 #2
0
ファイル: sentry_react.py プロジェクト: carriercomm/sentry-1
def get_react_config(context):
    if 'request' in context:
        user = context['request'].user
    else:
        user = None

    if user:
        user = extract_lazy_object(user)

    enabled_features = []
    if features.has('organizations:create', actor=user):
        enabled_features.append('organizations:create')
    if features.has('auth:register', actor=user):
        enabled_features.append('auth:register')

    context = {
        'singleOrganization': settings.SENTRY_SINGLE_ORGANIZATION,
        'urlPrefix': settings.SENTRY_URL_PREFIX,
        'version': _get_version_info(),
        'features': enabled_features,
        'mediaUrl': reverse('sentry-media', args=['sentry', '']),
    }
    if user and user.is_authenticated():
        context.update({
            'isAuthenticated': True,
            'user': serialize(user, user),
        })
    else:
        context.update({
            'isAuthenticated': False,
            'user': None,
        })
    return mark_safe(json.dumps(context))
コード例 #3
0
ファイル: organization.py プロジェクト: simudream/sentry
    def serialize(self, obj, attrs, user):
        from sentry import features
        from sentry.app import env
        from sentry.api.serializers.models.team import TeamWithProjectsSerializer

        team_list = list(Team.objects.filter(organization=obj, status=TeamStatus.VISIBLE))

        feature_list = []
        if features.has("organizations:events", obj, actor=user):
            feature_list.append("events")
        if features.has("organizations:sso", obj, actor=user):
            feature_list.append("sso")

        if getattr(obj.flags, "allow_joinleave"):
            feature_list.append("open-membership")

        context = super(DetailedOrganizationSerializer, self).serialize(obj, attrs, user)
        context["quota"] = {
            "maxRate": quotas.get_organization_quota(obj),
            "projectLimit": int(
                OrganizationOption.objects.get_value(organization=obj, key="sentry:project-rate-limit", default=100)
            ),
        }
        context["teams"] = serialize(team_list, user, TeamWithProjectsSerializer())
        if env.request:
            context["access"] = access.from_request(env.request, obj).scopes
        else:
            context["access"] = access.from_user(user, obj).scopes
        context["features"] = feature_list
        context["pendingAccessRequests"] = OrganizationAccessRequest.objects.filter(team__organization=obj).count()
        return context
コード例 #4
0
ファイル: auth_login.py プロジェクト: Juraldinio/sentry
    def handle(self, request):
        if request.user.is_authenticated():
            return self.redirect(get_login_redirect(request))

        form = AuthenticationForm(request, request.POST or None, captcha=bool(request.session.get("needs_captcha")))
        if form.is_valid():
            login(request, form.get_user())

            request.session.pop("needs_captcha", None)

            return self.redirect(get_login_redirect(request))

        elif request.POST and not request.session.get("needs_captcha"):
            request.session["needs_captcha"] = 1
            form = AuthenticationForm(request, request.POST or None, captcha=True)
            form.errors.pop("captcha", None)

        request.session.set_test_cookie()

        context = {
            "form": form,
            "next": request.session.get("_next"),
            "CAN_REGISTER": features.has("auth:register") or request.session.get("can_register"),
            "AUTH_PROVIDERS": get_auth_providers(),
            "SOCIAL_AUTH_CREATE_USERS": features.has("social-auth:register"),
        }
        return self.respond("sentry/login.html", context)
コード例 #5
0
ファイル: group_integrations.py プロジェクト: Kayle009/sentry
    def get(self, request, group):
        has_issue_basic = features.has('organizations:integrations-issue-basic',
                                       group.organization,
                                       actor=request.user)

        has_issue_sync = features.has('organizations:integrations-issue-sync',
                                      group.organization,
                                      actor=request.user)

        if not (has_issue_basic or has_issue_sync):
            return self.respond([])

        providers = [
            i.key for i in integrations.all() if i.has_feature(IntegrationFeatures.ISSUE_BASIC) or i.has_feature(IntegrationFeatures.ISSUE_SYNC)
        ]
        return self.paginate(
            queryset=Integration.objects.filter(
                organizations=group.organization,
                provider__in=providers,
            ),
            request=request,
            order_by='name',
            on_results=lambda x: serialize(x, request.user, IntegrationIssueSerializer(group)),
            paginator_cls=OffsetPaginator,
        )
コード例 #6
0
    def get(self, request, organization):
        has_catchall = features.has('organizations:internal-catchall',
                                    organization,
                                    actor=request.user)
        has_github_apps = features.has('organizations:github-apps',
                                       organization,
                                       actor=request.user)

        providers = []
        for provider in integrations.all():
            internal_integrations = {
                i for i in settings.SENTRY_INTERNAL_INTEGRATIONS if i != 'github' or not has_github_apps}
            if not has_catchall and provider.key in internal_integrations:
                continue

            providers.append(provider)

        providers.sort(key=lambda i: i.key)

        serialized = serialize(
            providers,
            organization=organization,
            serializer=IntegrationProviderSerializer(),
        )

        return Response({'providers': serialized})
コード例 #7
0
ファイル: accounts.py プロジェクト: andrei1610399/sentry
def register(request):
    from django.conf import settings

    if not (features.has('auth:register') or request.session.get('can_register')):
        return HttpResponseRedirect(reverse('sentry'))

    form = RegistrationForm(request.POST or None,
                            captcha=bool(request.session.get('needs_captcha')))
    if form.is_valid():
        user = form.save()

        # can_register should only allow a single registration
        request.session.pop('can_register', None)

        # HACK: grab whatever the first backend is and assume it works
        user.backend = settings.AUTHENTICATION_BACKENDS[0]

        login_user(request, user)

        request.session.pop('needs_captcha', None)

        return login_redirect(request)

    elif request.POST and not request.session.get('needs_captcha'):
        request.session['needs_captcha'] = 1
        form = RegistrationForm(request.POST or None, captcha=True)
        form.errors.pop('captcha', None)

    return render_to_response('sentry/register.html', {
        'form': form,
        'AUTH_PROVIDERS': get_auth_providers(),
        'SOCIAL_AUTH_CREATE_USERS': features.has('social-auth:register'),
    }, request)
コード例 #8
0
    def handle(self, request, organization):
        try:
            auth_provider = AuthProvider.objects.get(
                organization=organization,
            )
        except AuthProvider.DoesNotExist:
            pass
        else:
            provider = auth_provider.get_provider()
            requires_feature = provider.required_feature

            # Provider is not enabled
            # Allow superusers to edit and disable SSO for orgs that
            # downgrade plans and can no longer access the feature
            if requires_feature and not features.has(
                requires_feature,
                organization,
                actor=request.user
            ) and not is_active_superuser(request):
                home_url = organization.get_url()
                messages.add_message(request, messages.ERROR, ERR_NO_SSO)

                return HttpResponseRedirect(home_url)

            return self.handle_existing_provider(
                request=request,
                organization=organization,
                auth_provider=auth_provider,
            )

        if request.method == 'POST':
            provider_key = request.POST.get('provider')
            if not manager.exists(provider_key):
                raise ValueError(u'Provider not found: {}'.format(provider_key))

            helper = AuthHelper(
                request=request,
                organization=organization,
                provider_key=provider_key,
                flow=AuthHelper.FLOW_SETUP_PROVIDER,
            )

            feature = helper.provider.required_feature
            if feature and not features.has(feature, organization, actor=request.user):
                return HttpResponse('Provider is not enabled', status=401)

            if request.POST.get('init'):
                helper.init_pipeline()

            if not helper.pipeline_is_valid():
                return helper.error('Something unexpected happened during authentication.')

            # render first time setup view
            return helper.current_step()

        # Otherwise user is in bad state since frontend/react should handle this case
        return HttpResponseRedirect(
            organization.get_url()
        )
コード例 #9
0
ファイル: organization.py プロジェクト: E-LLP/sentry
    def serialize(self, obj, attrs, user):
        from sentry import features
        from sentry.app import env
        from sentry.api.serializers.models.team import TeamWithProjectsSerializer

        team_list = list(Team.objects.filter(
            organization=obj,
            status=TeamStatus.VISIBLE,
        ))
        for team in team_list:
            team._organization_cache = obj

        onboarding_tasks = list(OrganizationOnboardingTask.objects.filter(
            organization=obj,
        ).select_related('user'))

        feature_list = []
        if features.has('organizations:sso', obj, actor=user):
            feature_list.append('sso')
        if features.has('organizations:callsigns', obj, actor=user):
            feature_list.append('callsigns')
        if features.has('organizations:new-tracebacks', obj, actor=user):
            feature_list.append('new-tracebacks')
        if features.has('organizations:onboarding', obj, actor=user) and \
                not OrganizationOption.objects.filter(organization=obj).exists():
            feature_list.append('onboarding')
        if features.has('organizations:api-keys', obj, actor=user) or \
                ApiKey.objects.filter(organization=obj).exists():
            feature_list.append('api-keys')

        if getattr(obj.flags, 'allow_joinleave'):
            feature_list.append('open-membership')
        if not getattr(obj.flags, 'disable_shared_issues'):
            feature_list.append('shared-issues')

        context = super(DetailedOrganizationSerializer, self).serialize(
            obj, attrs, user)
        context['quota'] = {
            'maxRate': quotas.get_organization_quota(obj),
            'projectLimit': int(OrganizationOption.objects.get_value(
                organization=obj,
                key='sentry:project-rate-limit',
                default=100,
            )),
        }
        context['teams'] = serialize(
            team_list, user, TeamWithProjectsSerializer())
        if env.request:
            context['access'] = access.from_request(env.request, obj).scopes
        else:
            context['access'] = access.from_user(user, obj).scopes
        context['features'] = feature_list
        context['pendingAccessRequests'] = OrganizationAccessRequest.objects.filter(
            team__organization=obj,
        ).count()
        context['onboardingTasks'] = serialize(onboarding_tasks, user, OnboardingTasksSerializer())
        return context
コード例 #10
0
ファイル: sentry_react.py プロジェクト: ForkRepo/sentry
def get_react_config(context):
    if 'request' in context:
        user = context['request'].user
        messages = get_messages(context['request'])
        try:
            is_superuser = context['request'].is_superuser()
        except AttributeError:
            is_superuser = False
    else:
        user = None
        messages = []
        is_superuser = False

    if user:
        user = extract_lazy_object(user)

    enabled_features = []
    if features.has('organizations:create', actor=user):
        enabled_features.append('organizations:create')
    if features.has('auth:register', actor=user):
        enabled_features.append('auth:register')

    version_info = _get_version_info()

    needs_upgrade = False

    if is_superuser:
        needs_upgrade = _needs_upgrade()

    context = {
        'singleOrganization': settings.SENTRY_SINGLE_ORGANIZATION,
        'supportEmail': get_support_mail(),
        'urlPrefix': options.get('system.url-prefix'),
        'version': version_info,
        'features': enabled_features,
        'mediaUrl': get_asset_url('sentry', ''),
        'needsUpgrade': needs_upgrade,
        'dsn': _get_public_dsn(),
        'statuspage': _get_statuspage(),
        'messages': [{
            'message': msg.message,
            'level': msg.tags,
        } for msg in messages],
        'isOnPremise': settings.SENTRY_ONPREMISE,
    }
    if user and user.is_authenticated():
        context.update({
            'isAuthenticated': True,
            'user': serialize(user, user),
        })
        context['user']['isSuperuser'] = is_superuser
    else:
        context.update({
            'isAuthenticated': False,
            'user': None,
        })
    return json.dumps_htmlsafe(context)
コード例 #11
0
    def _has_issue_feature(self, organization, user):
        has_issue_basic = features.has('organizations:integrations-issue-basic',
                                       organization,
                                       actor=user)

        has_issue_sync = features.has('organizations:integrations-issue-sync',
                                      organization,
                                      actor=user)

        return has_issue_sync or has_issue_basic
コード例 #12
0
    def handle(self, request, organization):
        if not features.has('organizations:sso-basic', organization, actor=request.user):
            messages.add_message(
                request,
                messages.ERROR,
                ERR_NO_SSO,
            )
            return HttpResponseRedirect(
                reverse('sentry-organization-home', args=[organization.slug])
            )

        try:
            auth_provider = AuthProvider.objects.get(
                organization=organization,
            )
        except AuthProvider.DoesNotExist:
            pass
        else:
            return self.handle_existing_provider(
                request=request,
                organization=organization,
                auth_provider=auth_provider,
            )

        if request.method == 'POST':
            provider_key = request.POST.get('provider')
            if not manager.exists(provider_key):
                raise ValueError(u'Provider not found: {}'.format(provider_key))

            helper = AuthHelper(
                request=request,
                organization=organization,
                provider_key=provider_key,
                flow=AuthHelper.FLOW_SETUP_PROVIDER,
            )

            feature = helper.provider.required_feature
            if feature and not features.has(feature, organization, actor=request.user):
                return HttpResponse('Provider is not enabled', status=401)

            if request.POST.get('init'):
                helper.init_pipeline()

            if not helper.pipeline_is_valid():
                return helper.error('Something unexpected happened during authentication.')

            # render first time setup view
            return helper.current_step()

        # Otherwise user is in bad state since frontend/react should handle this case
        return HttpResponseRedirect(
            reverse('sentry-organization-home', args=[organization.slug])
        )
コード例 #13
0
    def get(self, request, installation):
        if not features.has('organizations:internal-catchall',
                            installation.organization,
                            actor=request.user):
            return Response(status=404)

        return Response(serialize(installation))
コード例 #14
0
ファイル: organization.py プロジェクト: noah-lee/sentry
    def serialize(self, obj, attrs, user):
        from sentry import features
        from sentry.api.serializers.models.team import TeamWithProjectsSerializer

        team_list = list(Team.objects.filter(
            organization=obj,
            status=TeamStatus.VISIBLE,
        ))

        feature_list = []
        if features.has('organizations:sso', obj, actor=user):
            feature_list.append('sso')

        if getattr(obj.flags, 'allow_joinleave'):
            feature_list.append('open-membership')

        context = super(DetailedOrganizationSerializer, self).serialize(
            obj, attrs, user)
        context['teams'] = serialize(
            team_list, user, TeamWithProjectsSerializer())
        context['access'] = access.from_user(user, obj).scopes
        context['features'] = feature_list
        context['pendingAccessRequests'] = OrganizationAccessRequest.objects.filter(
            team__organization=obj,
        ).count()
        return context
コード例 #15
0
    def handle_basic_auth(self, request, organization):
        can_register = features.has('auth:register') or request.session.get('can_register')

        op = request.POST.get('op')
        login_form = self.get_login_form(request)
        if can_register:
            register_form = self.get_register_form(request)
        else:
            register_form = None

        if can_register and register_form.is_valid():
            user = register_form.save()

            defaults = {
                'has_global_access': True,
                'type': OrganizationMemberType.MEMBER,
            }

            organization.member_set.create(
                user=user,
                **defaults
            )

            # HACK: grab whatever the first backend is and assume it works
            user.backend = settings.AUTHENTICATION_BACKENDS[0]

            login(request, user)

            # can_register should only allow a single registration
            request.session.pop('can_register', None)

            request.session.pop('needs_captcha', None)

            return self.redirect(get_login_redirect(request))

        elif login_form.is_valid():
            login(request, login_form.get_user())

            request.session.pop('needs_captcha', None)

            return self.redirect(get_login_redirect(request))

        elif request.POST and not request.session.get('needs_captcha'):
            request.session['needs_captcha'] = 1
            login_form = self.get_login_form(request)
            login_form.errors.pop('captcha', None)
            if can_register:
                register_form = self.get_register_form(request)
                register_form.errors.pop('captcha', None)

        request.session.set_test_cookie()

        context = {
            'op': op or 'login',
            'login_form': login_form,
            'register_form': register_form,
            'organization': organization,
            'CAN_REGISTER': can_register,
        }
        return self.respond('sentry/organization-login.html', context)
コード例 #16
0
    def handle_basic_auth(self, request, organization):
        form = SimplifiedAuthenticationForm(
            request, request.POST or None,
            captcha=bool(request.session.get('needs_captcha')),
        )

        if form.is_valid():
            login(request, form.get_user())

            request.session.pop('needs_captcha', None)

            return self.redirect(get_login_redirect(request))

        elif request.POST and not request.session.get('needs_captcha'):
            request.session['needs_captcha'] = 1
            form = AuthenticationForm(request, request.POST or None, captcha=True)
            form.errors.pop('captcha', None)

        context = {
            'form': form,
            'CAN_REGISTER': features.has('auth:register') or request.session.get('can_register'),
            'organization': organization,
        }

        return self.respond('sentry/organization-login.html', context)
コード例 #17
0
ファイル: remove_project.py プロジェクト: hosmelq/sentry
    def handle(self, request, organization, project):
        form = self.get_form(request)

        if form.is_valid():
            client.delete(
                '/projects/{}/{}/'.format(organization.slug, project.slug),
                request=request,
                is_sudo=True
            )

            has_new_teams = features.has(
                'organizations:new-teams',
                organization,
                actor=request.user,
            )
            project_name = (project.slug if has_new_teams else project.name).encode('utf-8')
            messages.add_message(
                request, messages.SUCCESS,
                _(u'The project %r was scheduled for deletion.') % (project_name, )
            )

            return HttpResponseRedirect(
                reverse('sentry-organization-home', args=[organization.slug])
            )

        context = {
            'form': form,
        }

        return self.respond('sentry/projects/remove.html', context)
コード例 #18
0
ファイル: quotas.py プロジェクト: DZTPY/sentry
def manage_project_quotas(request, organization, project):
    if not features.has('projects:quotas', project, actor=request.user):
        messages.add_message(
            request, messages.ERROR,
            ERR_NO_SSO,
        )
        redirect = reverse('sentry-manage-project',
                           args=[organization.slug, project.slug])
        return HttpResponseRedirect(redirect)

    form = ProjectQuotasForm(project, request.POST or None)

    if form and form.is_valid():
        form.save()

        messages.add_message(
            request, messages.SUCCESS,
            _('Your settings were saved successfully.'))

        return HttpResponseRedirect(reverse('sentry-manage-project-quotas', args=[project.organization.slug, project.slug]))

    context = {
        'organization': organization,
        'team': project.team,
        'page': 'quotas',
        # TODO(dcramer): has_quotas is an awful hack
        'has_quotas': type(app.quotas) != Quota,
        'system_quota': int(app.quotas.get_system_quota()),
        'team_quota': int(app.quotas.get_team_quota(project.team)),
        'project': project,
        'form': form,
    }
    return render_to_response('sentry/projects/quotas.html', context, request)
コード例 #19
0
ファイル: project.py プロジェクト: PegasusWang/sentry
    def serialize(self, obj, attrs, user):
        from sentry import features

        feature_list = []
        for feature in ('csp', 'event-types', 'global-events', 'user-reports', 'dsym'):
            if features.has('projects:' + feature, obj, actor=user):
                feature_list.append(feature)

        status_label = STATUS_LABELS.get(obj.status, 'unknown')

        return {
            'id': str(obj.id),
            'slug': obj.slug,
            'name': obj.name,
            'isPublic': obj.public,
            'isBookmarked': attrs['is_bookmarked'],
            'callSign': obj.callsign,
            'color': obj.color,
            # TODO(mitsuhiko): eventually remove this when we will treat
            # all short names as reviewed.
            'callSignReviewed': bool(attrs['reviewed-callsign']),
            'dateCreated': obj.date_added,
            'firstEvent': obj.first_event,
            'features': feature_list,
            'status': status_label,
        }
コード例 #20
0
ファイル: integrations.py プロジェクト: mjumbewu/sentry
def sync_assignee_outbound(external_issue_id, user_id, assign, **kwargs):
    # sync Sentry assignee to an external issue
    external_issue = ExternalIssue.objects.get(id=external_issue_id)

    organization = Organization.objects.get(id=external_issue.organization_id)
    has_issue_sync = features.has('organizations:integrations-issue-sync',
                                  organization)

    if not has_issue_sync:
        return

    integration = Integration.objects.get(id=external_issue.integration_id)
    # assume unassign if None
    if user_id is None:
        user = None
    else:
        user = User.objects.get(id=user_id)

    installation = integration.get_installation(
        organization_id=external_issue.organization_id,
    )
    if installation.should_sync('outbound_assignee'):
        installation.sync_assignee_outbound(external_issue, user, assign=assign)
        analytics.record(
            'integration.issue.assignee.synced',
            provider=integration.provider,
            id=integration.id,
            organization_id=external_issue.organization_id,
        )
コード例 #21
0
    def get_description(self):
        data = self.activity.data

        if features.has('organizations:sentry10', self.organization):
            url = u'/organizations/{}/releases/{}/?project={}'.format(
                self.organization.slug,
                data['version'],
                self.project.id,
            )
        else:
            url = u'/{}/{}/releases/{}/'.format(
                self.organization.slug,
                self.project.slug,
                data['version'],
            )

        if data.get('version'):
            return u'{author} marked {an issue} as resolved in {version}', {
                'version': data['version'],
            }, {
                'version':
                u'<a href="{}">{}</a>'.format(
                    absolute_uri(url),
                    escape(data['version']),
                )
            }
        return u'{author} marked {an issue} as resolved in an upcoming release'
コード例 #22
0
    def handle(self, request, organization):
        if not features.has('organizations:sso', organization, actor=request.user):
            messages.add_message(
                request, messages.ERROR,
                ERR_NO_SSO,
            )
            return HttpResponseRedirect(reverse('sentry-organization-home', args=[organization.slug]))

        try:
            auth_provider = AuthProvider.objects.get(
                organization=organization,
            )
        except AuthProvider.DoesNotExist:
            pass
        else:
            return self.handle_existing_provider(
                request=request,
                organization=organization,
                auth_provider=auth_provider,
            )

        if request.method == 'POST':
            provider_key = request.POST.get('provider')
            if not manager.exists(provider_key):
                raise ValueError('Provider not found: {}'.format(provider_key))

            # render first time setup view
            return self.handle_provider_setup(request, organization, provider_key)

        context = {
            'provider_list': [(k, v.name) for k, v in manager],
        }

        return self.respond('sentry/organization-auth-settings.html', context)
コード例 #23
0
ファイル: plugin.py プロジェクト: getsentry/sentry
 def get_group_data(self, group, event, triggering_rules):
     data = {
         'id': six.text_type(group.id),
         'project': group.project.slug,
         'project_name': group.project.name,
         'project_slug': group.project.slug,
         'logger': event.get_tag('logger'),
         'level': event.get_tag('level'),
         'culprit': group.culprit,
         'message': event.real_message,
         'url': group.get_absolute_url(params={'referrer': 'webhooks_plugin'}),
         'triggering_rules': triggering_rules,
     }
     data['event'] = dict(event.data or {})
     data['event']['tags'] = event.tags
     data['event']['event_id'] = event.event_id
     if features.has('organizations:legacy-event-id', group.project.organization):
         try:
             data['event']['id'] = Event.objects.filter(
                 project_id=event.project_id,
                 event_id=event.event_id,
             ).values_list('id', flat=True).get()
         except Event.DoesNotExist:
             data['event']['id'] = None
     return data
コード例 #24
0
    def post(self, request, organization):
        """
        Create a saved query
        """
        if not features.has('organizations:discover', organization, actor=request.user):
            return self.respond(status=404)

        serializer = DiscoverSavedQuerySerializer(data=request.DATA, context={
            'organization': organization,
        })

        if not serializer.is_valid():
            return Response(serializer.errors, status=400)

        data = serializer.object

        model = DiscoverSavedQuery.objects.create(
            organization=organization,
            name=data['name'],
            query=data['query'],
            created_by=request.user if request.user.is_authenticated() else None,
        )

        model.set_projects(data['project_ids'])

        return Response(serialize(model), status=201)
コード例 #25
0
ファイル: integrations.py プロジェクト: mjumbewu/sentry
def sync_status_outbound(group_id, external_issue_id, **kwargs):
    try:
        group = Group.objects.filter(
            id=group_id,
            status__in=[GroupStatus.UNRESOLVED, GroupStatus.RESOLVED],
        )[0]
    except IndexError:
        return

    has_issue_sync = features.has('organizations:integrations-issue-sync',
                                  group.organization)
    if not has_issue_sync:
        return

    external_issue = ExternalIssue.objects.get(id=external_issue_id)
    integration = Integration.objects.get(id=external_issue.integration_id)
    installation = integration.get_installation(
        organization_id=external_issue.organization_id,
    )
    if installation.should_sync('outbound_status'):
        installation.sync_status_outbound(
            external_issue, group.status == GroupStatus.RESOLVED, group.project_id
        )
        analytics.record(
            'integration.issue.status.synced',
            provider=integration.provider,
            id=integration.id,
            organization_id=external_issue.organization_id,
        )
コード例 #26
0
ファイル: project_settings.py プロジェクト: berny9015/sentry
    def __init__(self, request, organization, team_list, data, instance, *args, **kwargs):
        # First, we need to check for the value overrides from the Organization options
        # We need to do this before `initial` gets passed into the Form.
        disabled = []
        if 'initial' in kwargs:
            for opt in self.org_overrides:
                value = bool(organization.get_option('sentry:require_%s' % (opt,), False))
                if value:
                    disabled.append(opt)
                    kwargs['initial'][opt] = value

        super(EditProjectForm, self).__init__(data=data, instance=instance, *args, **kwargs)

        self.organization = organization
        self.team_list = team_list

        self.fields['team'].choices = self.get_team_choices(team_list, instance.team)
        self.fields['team'].widget.choices = self.fields['team'].choices

        if not features.has('organizations:callsigns', organization, actor=request.user):
            del self.fields['callsign']

        # After the Form is initialized, we now need to disable the fields that have been
        # overridden from Organization options.
        for opt in disabled:
            self.fields[opt].widget.attrs['disabled'] = 'disabled'
コード例 #27
0
    def get(self, request, organization):
        """
        Retrieve details about Organization's SSO settings and
        currently installed auth_provider
        ``````````````````````````````````````````````````````

        :pparam string organization_slug: the organization short name
        :auth: required
        """
        if not features.has('organizations:sso', organization, actor=request.user):
            return Response(ERR_NO_SSO, status=status.HTTP_403_FORBIDDEN)

        try:
            auth_provider = AuthProvider.objects.get(
                organization=organization,
            )
        except AuthProvider.DoesNotExist:
            # This is a valid state where org does not have an auth provider
            # configured, make sure we respond with a 20x
            return Response(status=status.HTTP_204_NO_CONTENT)

        # cache organization so that we don't need to query for org when serializing
        auth_provider._organization_cache = organization

        return Response(serialize(auth_provider, request.user))
コード例 #28
0
ファイル: sentry_app_details.py プロジェクト: Kayle009/sentry
    def put(self, request, sentry_app):
        if not features.has('organizations:internal-catchall',
                            sentry_app.owner,
                            actor=request.user):

            return Response(status=404)

        serializer = SentryAppSerializer(data=request.DATA, partial=True)

        if serializer.is_valid():
            result = serializer.object

            updated_app = Updater.run(
                sentry_app=sentry_app,
                name=result.get('name'),
                webhook_url=result.get('webhookUrl'),
                redirect_url=result.get('redirectUrl'),
                is_alertable=result.get('isAlertable'),
                scopes=result.get('scopes'),
                events=result.get('events'),
                overview=result.get('overview'),
            )

            return Response(serialize(updated_app, request.user))

        return Response(serializer.errors, status=400)
コード例 #29
0
ファイル: project.py プロジェクト: duanshuaimin/sentry
    def serialize(self, obj, attrs, user):
        from sentry import features

        feature_list = []
        for feature in ('global-events', 'data-forwarding', 'rate-limits'):
            if features.has('projects:' + feature, obj, actor=user):
                feature_list.append(feature)

        if obj.flags.has_releases:
            feature_list.append('releases')

        status_label = STATUS_LABELS.get(obj.status, 'unknown')

        return {
            'id': six.text_type(obj.id),
            'slug': obj.slug,
            'name': obj.name,
            'isPublic': obj.public,
            'isBookmarked': attrs['is_bookmarked'],
            'callSign': obj.callsign,
            'color': obj.color,
            'dateCreated': obj.date_added,
            'firstEvent': obj.first_event,
            'features': feature_list,
            'status': status_label,
        }
コード例 #30
0
ファイル: project.py プロジェクト: NuttasitBoonwat/sentry
    def serialize(self, obj, attrs, user):
        from sentry import features

        feature_list = []
        for feature in (
            'global-events', 'data-forwarding', 'rate-limits', 'custom-filters', 'similarity-view',
            'custom-inbound-filters', 'minidump',
        ):
            if features.has('projects:' + feature, obj, actor=user):
                feature_list.append(feature)

        if obj.flags.has_releases:
            feature_list.append('releases')

        status_label = STATUS_LABELS.get(obj.status, 'unknown')

        context = {
            'id': six.text_type(obj.id),
            'slug': obj.slug,
            'name': obj.name,
            'isPublic': obj.public,
            'isBookmarked': attrs['is_bookmarked'],
            'callSign': obj.callsign,
            'color': obj.color,
            'dateCreated': obj.date_added,
            'firstEvent': obj.first_event,
            'features': feature_list,
            'status': status_label,
            'platform': obj.platform,
        }
        if 'stats' in attrs:
            context['stats'] = attrs['stats']
        return context
コード例 #31
0
ファイル: discover_query.py プロジェクト: zttzxh/sentry
 def has_feature(self, request, organization):
     return features.has(
         "organizations:discover", organization,
         actor=request.user) or features.has("organizations:discover-basic",
                                             organization,
                                             actor=request.user)
コード例 #32
0
 def has_discover_snql(self, organization: Organization,
                       request: Request) -> bool:
     return features.has("organizations:discover-use-snql",
                         organization,
                         actor=request.user)
コード例 #33
0
    def post(self, request, project, **kwargs):
        # Minidump request payloads do not have the same structure as
        # usual events from other SDKs. Most notably, the event needs
        # to be transfered in the `sentry` form field. All other form
        # fields are assumed "extra" information. The only exception
        # to this is `upload_file_minidump`, which contains the minidump.

        if any(key.startswith('sentry[') for key in request.POST):
            # First, try to parse the nested form syntax `sentry[key][key]`
            # This is required for the Breakpad client library, which only
            # supports string values of up to 64 characters.
            extra = parser.parse(request.POST.urlencode())
            data = extra.pop('sentry', {})
        else:
            # Custom clients can submit longer payloads and should JSON
            # encode event data into the optional `sentry` field.
            extra = request.POST
            json_data = extra.pop('sentry', None)
            data = json.loads(json_data[0]) if json_data else {}

        # Merge additional form fields from the request with `extra`
        # data from the event payload and set defaults for processing.
        extra.update(data.get('extra', {}))
        data['extra'] = extra

        # Assign our own UUID so we can track this minidump. We cannot trust the
        # uploaded filename, and if reading the minidump fails there is no way
        # we can ever retrieve the original UUID from the minidump.
        event_id = data.get('event_id') or uuid.uuid4().hex
        data['event_id'] = event_id

        # At this point, we only extract the bare minimum information
        # needed to continue processing. This requires to process the
        # minidump without symbols and CFI to obtain an initial stack
        # trace (most likely via stack scanning). If all validations
        # pass, the event will be inserted into the database.
        try:
            minidump = request.FILES['upload_file_minidump']
        except KeyError:
            raise APIError('Missing minidump upload')

        # Breakpad on linux sometimes stores the entire HTTP request body as
        # dump file instead of just the minidump. The Electron SDK then for
        # example uploads a multipart formdata body inside the minidump file.
        # It needs to be re-parsed, to extract the actual minidump before
        # continuing.
        minidump.seek(0)
        if minidump.read(2) == b'--':
            # The remaining bytes of the first line are the form boundary. We
            # have already read two bytes, the remainder is the form boundary
            # (excluding the initial '--').
            boundary = minidump.readline().rstrip()
            minidump.seek(0)

            # Next, we have to fake a HTTP request by specifying the form
            # boundary and the content length, or otherwise Django will not try
            # to parse our form body. Also, we need to supply new upload
            # handlers since they cannot be reused from the current request.
            meta = {
                'CONTENT_TYPE': b'multipart/form-data; boundary=%s' % boundary,
                'CONTENT_LENGTH': minidump.size,
            }
            handlers = [
                uploadhandler.load_handler(handler, request)
                for handler in settings.FILE_UPLOAD_HANDLERS
            ]

            _, files = MultiPartParser(meta, minidump, handlers).parse()
            try:
                minidump = files['upload_file_minidump']
            except KeyError:
                raise APIError('Missing minidump upload')

        if minidump.size == 0:
            raise APIError('Empty minidump upload received')

        if settings.SENTRY_MINIDUMP_CACHE:
            if not os.path.exists(settings.SENTRY_MINIDUMP_PATH):
                os.mkdir(settings.SENTRY_MINIDUMP_PATH, 0o744)

            with open('%s/%s.dmp' % (settings.SENTRY_MINIDUMP_PATH, event_id),
                      'wb') as out:
                for chunk in minidump.chunks():
                    out.write(chunk)

        # Always store the minidump in attachments so we can access it during
        # processing, regardless of the event-attachments feature. This will
        # allow us to stack walk again with CFI once symbols are loaded.
        attachments = []
        minidump.seek(0)
        attachments.append(
            CachedAttachment.from_upload(minidump,
                                         type=MINIDUMP_ATTACHMENT_TYPE))

        # Append all other files as generic attachments. We can skip this if the
        # feature is disabled since they won't be saved.
        if features.has('organizations:event-attachments',
                        project.organization,
                        actor=request.user):
            for name, file in six.iteritems(request.FILES):
                if name != 'upload_file_minidump':
                    attachments.append(CachedAttachment.from_upload(file))

        try:
            state = process_minidump(minidump)
            merge_process_state_event(data, state)
        except ProcessMinidumpError as e:
            minidumps_logger.exception(e)
            raise APIError(e.message.split('\n', 1)[0])

        response_or_event_id = self.process(request,
                                            attachments=attachments,
                                            data=data,
                                            project=project,
                                            **kwargs)

        if isinstance(response_or_event_id, HttpResponse):
            return response_or_event_id

        # Return the formatted UUID of the generated event. This is
        # expected by the Electron http uploader on Linux and doesn't
        # break the default Breakpad client library.
        return HttpResponse(six.text_type(uuid.UUID(response_or_event_id)),
                            content_type='text/plain')
コード例 #34
0
ファイル: sentry_apps.py プロジェクト: zl0422/sentry
    def post(self, request, organization):
        data = {
            "name": request.json_body.get("name"),
            "user": request.user,
            "author": request.json_body.get("author"),
            "organization": organization,
            "webhookUrl": request.json_body.get("webhookUrl"),
            "redirectUrl": request.json_body.get("redirectUrl"),
            "isAlertable": request.json_body.get("isAlertable"),
            "isInternal": request.json_body.get("isInternal"),
            "verifyInstall": request.json_body.get("verifyInstall"),
            "scopes": request.json_body.get("scopes", []),
            "events": request.json_body.get("events", []),
            "schema": request.json_body.get("schema", {}),
            "overview": request.json_body.get("overview"),
            "allowedOrigins": request.json_body.get("allowedOrigins", []),
        }

        if self._has_hook_events(request) and not features.has(
            "organizations:integrations-event-hooks", organization, actor=request.user
        ):

            return Response(
                {
                    "non_field_errors": [
                        "Your organization does not have access to the 'error' resource subscription."
                    ]
                },
                status=403,
            )

        serializer = SentryAppSerializer(data=data, access=request.access)

        if serializer.is_valid():
            data["redirect_url"] = data["redirectUrl"]
            data["webhook_url"] = data["webhookUrl"]
            data["is_alertable"] = data["isAlertable"]
            data["verify_install"] = data["verifyInstall"]
            data["allowed_origins"] = data["allowedOrigins"]
            data["is_internal"] = data.get("isInternal")

            creator = InternalCreator if data.get("isInternal") else Creator
            try:
                sentry_app = creator.run(request=request, **data)
            except ValidationError as e:
                # we generate and validate the slug here instead of the serializer since the slug never changes
                return Response(e.detail, status=400)

            return Response(serialize(sentry_app, access=request.access), status=201)

        # log any errors with schema
        if "schema" in serializer.errors:
            for error_message in serializer.errors["schema"]:
                name = "sentry_app.schema_validation_error"
                log_info = {
                    "schema": json.dumps(data["schema"]),
                    "user_id": request.user.id,
                    "sentry_app_name": data["name"],
                    "organization_id": organization.id,
                    "error_message": error_message,
                }
                logger.info(name, extra=log_info)
                analytics.record(name, **log_info)
        return Response(serializer.errors, status=400)
コード例 #35
0
    def post(self, request):
        """
        Create a New Organization
        `````````````````````````

        Create a new organization owned by the request's user.  To create
        an organization only the name is required.

        :param string name: the human readable name for the new organization.
        :param string slug: the unique URL slug for this organization.  If
                            this is not provided a slug is automatically
                            generated based on the name.
        :param bool agreeTerms: a boolean signaling you agree to the applicable
                                terms of service and privacy policy.
        :auth: required, user-context-needed
        """
        if not request.user.is_authenticated():
            return Response({'detail': 'This endpoint requires user info'}, status=401)

        if not features.has('organizations:create', actor=request.user):
            return Response(
                {
                    'detail': 'Organizations are not allowed to be created by this user.'
                }, status=401
            )

        limit = options.get('api.rate-limit.org-create')
        if limit and ratelimiter.is_limited(
            u'org-create:{}'.format(request.user.id),
            limit=limit,
            window=3600,
        ):
            return Response(
                {
                    'detail': 'You are attempting to create too many organizations too quickly.'
                },
                status=429
            )

        serializer = OrganizationSerializer(data=request.DATA)

        if serializer.is_valid():
            result = serializer.object

            try:
                with transaction.atomic():
                    org = Organization.objects.create(
                        name=result['name'],
                        slug=result.get('slug'),
                    )

                    om = OrganizationMember.objects.create(
                        organization=org,
                        user=request.user,
                        role=roles.get_top_dog().id,
                    )

                    if result.get('defaultTeam'):
                        team = org.team_set.create(
                            name=org.name,
                        )

                        OrganizationMemberTeam.objects.create(
                            team=team, organizationmember=om, is_active=True
                        )

                    self.create_audit_entry(
                        request=request,
                        organization=org,
                        target_object=org.id,
                        event=AuditLogEntryEvent.ORG_ADD,
                        data=org.get_audit_log_data(),
                    )

                    analytics.record(
                        'organization.created',
                        org,
                        actor_id=request.user.id if request.user.is_authenticated() else None
                    )

            except IntegrityError:
                return Response(
                    {
                        'detail': 'An organization with this slug already exists.'
                    },
                    status=409,
                )

            # failure on sending this signal is acceptable
            if result.get('agreeTerms'):
                terms_accepted.send_robust(
                    user=request.user,
                    organization=org,
                    ip_address=request.META['REMOTE_ADDR'],
                    sender=type(self),
                )

            return Response(serialize(org, request.user), status=201)
        return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
コード例 #36
0
ファイル: event_manager.py プロジェクト: sontek/sentry
    def _save_aggregate(self, event, hashes, release, **kwargs):
        project = event.project

        # attempt to find a matching hash
        all_hashes = self._find_hashes(project, hashes)

        existing_group_id = None
        for h in all_hashes:
            if h.group_id is not None:
                existing_group_id = h.group_id
                break
            if h.group_tombstone_id is not None:
                raise HashDiscarded('Matches group tombstone %s' %
                                    h.group_tombstone_id)

        # XXX(dcramer): this has the opportunity to create duplicate groups
        # it should be resolved by the hash merging function later but this
        # should be better tested/reviewed
        if existing_group_id is None:
            kwargs['score'] = ScoreClause.calculate(1, kwargs['last_seen'])
            # it's possible the release was deleted between
            # when we queried for the release and now, so
            # make sure it still exists
            first_release = kwargs.pop('first_release', None)

            with transaction.atomic():
                short_id = project.next_short_id()
                group, group_is_new = Group.objects.create(
                    project=project,
                    short_id=short_id,
                    first_release_id=Release.objects.filter(
                        id=first_release.id, ).values_list('id',
                                                           flat=True).first()
                    if first_release else None,
                    **kwargs), True

            metrics.incr('group.created',
                         skip_internal=True,
                         tags={'platform': event.platform or 'unknown'})

        else:
            group = Group.objects.get(id=existing_group_id)

            group_is_new = False

        # Keep a set of all of the hashes that are relevant for this event and
        # belong to the destination group so that we can record this as the
        # last processed event for each. (We can't just update every
        # ``GroupHash`` instance, since we only want to record this for events
        # that not only include the hash but were also placed into the
        # associated group.)
        relevant_group_hashes = set([
            instance for instance in all_hashes
            if instance.group_id == group.id
        ])

        # If all hashes are brand new we treat this event as new
        is_new = False
        new_hashes = [h for h in all_hashes if h.group_id is None]
        if new_hashes:
            # XXX: There is a race condition here wherein another process could
            # create a new group that is associated with one of the new hashes,
            # add some event(s) to it, and then subsequently have the hash
            # "stolen" by this process. This then "orphans" those events from
            # their "siblings" in the group we've created here. We don't have a
            # way to fix this, since we can't call `_ensure_hashes_merged`
            # without filtering on `group_id` (which we can't do due to query
            # planner weirdness.) For more context, see 84c6f75a and d0e22787,
            # as well as GH-5085.
            GroupHash.objects.filter(id__in=[
                h.id for h in new_hashes
            ], ).exclude(state=GroupHash.State.LOCKED_IN_MIGRATION, ).update(
                group=group)

            if group_is_new and len(new_hashes) == len(all_hashes):
                is_new = True

            # XXX: This can lead to invalid results due to a race condition and
            # lack of referential integrity enforcement, see above comment(s)
            # about "hash stealing".
            relevant_group_hashes.update(new_hashes)

        # XXX(dcramer): it's important this gets called **before** the aggregate
        # is processed as otherwise values like last_seen will get mutated
        can_sample = (features.has('projects:sample-events', project=project)
                      and should_sample(
                          event.data.get('received')
                          or float(event.datetime.strftime('%s')),
                          group.data.get('last_received')
                          or float(group.last_seen.strftime('%s')),
                          group.times_seen,
                      ))

        if not is_new:
            is_regression = self._process_existing_aggregate(
                group=group,
                event=event,
                data=kwargs,
                release=release,
            )
        else:
            is_regression = False

        # Determine if we've sampled enough data to store this event
        if is_new or is_regression:
            is_sample = False
        else:
            is_sample = can_sample

        if not is_sample:
            GroupHash.record_last_processed_event_id(
                project.id,
                [h.id for h in relevant_group_hashes],
                event.event_id,
            )

        return group, is_new, is_regression, is_sample
コード例 #37
0
ファイル: store.py プロジェクト: wux6533/sentry
def _do_save_event(cache_key=None,
                   data=None,
                   start_time=None,
                   event_id=None,
                   project_id=None,
                   **kwargs):
    """
    Saves an event to the database.
    """

    from sentry.event_manager import HashDiscarded, EventManager
    from sentry import quotas
    from sentry.models import ProjectKey
    from sentry.utils.outcomes import Outcome, track_outcome
    from sentry.ingest.outcomes_consumer import mark_signal_sent

    event_type = "none"

    if cache_key and data is None:
        with metrics.timer(
                "tasks.store.do_save_event.get_cache") as metric_tags:
            data = default_cache.get(cache_key)
            if data is not None:
                metric_tags["event_type"] = event_type = data.get(
                    "type") or "none"

    with metrics.global_tags(event_type=event_type):
        if data is not None:
            data = CanonicalKeyDict(data)

        if event_id is None and data is not None:
            event_id = data["event_id"]

        # only when we come from reprocessing we get a project_id sent into
        # the task.
        if project_id is None:
            project_id = data.pop("project")

        key_id = None if data is None else data.get("key_id")
        if key_id is not None:
            key_id = int(key_id)
        timestamp = to_datetime(start_time) if start_time is not None else None

        # We only need to delete raw events for events that support
        # reprocessing.  If the data cannot be found we want to assume
        # that we need to delete the raw event.
        if not data or reprocessing.event_supports_reprocessing(data):
            with metrics.timer("tasks.store.do_save_event.delete_raw_event"):
                delete_raw_event(project_id, event_id, allow_hint_clear=True)

        # This covers two cases: where data is None because we did not manage
        # to fetch it from the default cache or the empty dictionary was
        # stored in the default cache.  The former happens if the event
        # expired while being on the queue, the second happens on reprocessing
        # if the raw event was deleted concurrently while we held on to
        # it.  This causes the node store to delete the data and we end up
        # fetching an empty dict.  We could in theory not invoke `save_event`
        # in those cases but it's important that we always clean up the
        # reprocessing reports correctly or they will screw up the UI.  So
        # to future proof this correctly we just handle this case here.
        if not data:
            metrics.incr("events.failed",
                         tags={
                             "reason": "cache",
                             "stage": "post"
                         },
                         skip_internal=False)
            return

        with configure_scope() as scope:
            scope.set_tag("project", project_id)

        event = None
        try:
            with metrics.timer("tasks.store.do_save_event.event_manager.save"):
                manager = EventManager(data)
                # event.project.organization is populated after this statement.
                event = manager.save(project_id,
                                     assume_normalized=True,
                                     cache_key=cache_key)

            with metrics.timer("tasks.store.do_save_event.track_outcome"):
                # This is where we can finally say that we have accepted the event.
                track_outcome(
                    event.project.organization_id,
                    event.project.id,
                    key_id,
                    Outcome.ACCEPTED,
                    None,
                    timestamp,
                    event_id,
                )

        except HashDiscarded:
            project = Project.objects.get_from_cache(id=project_id)
            reason = FilterStatKeys.DISCARDED_HASH
            project_key = None
            try:
                if key_id is not None:
                    project_key = ProjectKey.objects.get_from_cache(id=key_id)
            except ProjectKey.DoesNotExist:
                pass

            quotas.refund(project, key=project_key, timestamp=start_time)
            # There is no signal supposed to be sent for this particular
            # outcome-reason combination. Prevent the outcome consumer from
            # emitting it for now.
            #
            # XXX(markus): Revisit decision about signals once outcomes consumer is stable.
            mark_signal_sent(project_id, event_id)
            track_outcome(
                project.organization_id,
                project_id,
                key_id,
                Outcome.FILTERED,
                reason,
                timestamp,
                event_id,
            )

        finally:
            if cache_key:
                with metrics.timer("tasks.store.do_save_event.delete_cache"):
                    default_cache.delete(cache_key)

                with metrics.timer(
                        "tasks.store.do_save_event.delete_attachment_cache"):
                    # For the unlikely case that we did not manage to persist the
                    # event we also delete the key always.
                    if event is None or features.has(
                            "organizations:event-attachments",
                            event.project.organization,
                            actor=None):
                        attachment_cache.delete(cache_key)

            if start_time:
                metrics.timing("events.time-to-process",
                               time() - start_time,
                               instance=data["platform"])
コード例 #38
0
ファイル: store.py プロジェクト: wux6533/sentry
def _do_process_event(cache_key,
                      start_time,
                      event_id,
                      process_task,
                      data=None):
    from sentry.plugins.base import plugins

    if data is None:
        data = default_cache.get(cache_key)

    if data is None:
        metrics.incr("events.failed",
                     tags={
                         "reason": "cache",
                         "stage": "process"
                     },
                     skip_internal=False)
        error_logger.error("process.failed.empty",
                           extra={"cache_key": cache_key})
        return

    data = CanonicalKeyDict(data)

    project_id = data["project"]
    event_id = data["event_id"]

    project = Project.objects.get_from_cache(id=project_id)

    with_datascrubbing = features.has("organizations:datascrubbers-v2",
                                      project.organization,
                                      actor=None)

    if with_datascrubbing:
        with metrics.timer("tasks.store.datascrubbers.data_bak"):
            data_bak = copy.deepcopy(data.data)

    with configure_scope() as scope:
        scope.set_tag("project", project_id)

    has_changed = False

    # Fetch the reprocessing revision
    reprocessing_rev = reprocessing.get_reprocessing_revision(project_id)

    try:
        # Event enhancers.  These run before anything else.
        for plugin in plugins.all(version=2):
            enhancers = safe_execute(plugin.get_event_enhancers, data=data)
            for enhancer in enhancers or ():
                enhanced = safe_execute(
                    enhancer, data, _passthrough_errors=(RetrySymbolication, ))
                if enhanced:
                    data = enhanced
                    has_changed = True

        # Stacktrace based event processors.
        new_data = process_stacktraces(data)
        if new_data is not None:
            has_changed = True
            data = new_data
    except RetrySymbolication as e:
        if start_time and (
                time() -
                start_time) > settings.SYMBOLICATOR_PROCESS_EVENT_WARN_TIMEOUT:
            error_logger.warning("process.slow",
                                 extra={
                                     "project_id": project_id,
                                     "event_id": event_id
                                 })

        if start_time and (
                time() -
                start_time) > settings.SYMBOLICATOR_PROCESS_EVENT_HARD_TIMEOUT:
            # Do not drop event but actually continue with rest of pipeline
            # (persisting unsymbolicated event)
            error_logger.exception(
                "process.failed.infinite_retry",
                extra={
                    "project_id": project_id,
                    "event_id": event_id
                },
            )
        else:
            retry_process_event.apply_async(
                args=(),
                kwargs={
                    "process_task_name": process_task.__name__,
                    "task_kwargs": {
                        "cache_key": cache_key,
                        "event_id": event_id,
                        "start_time": start_time,
                    },
                },
                countdown=e.retry_after,
            )
            return

    # Second round of datascrubbing after stacktrace and language-specific
    # processing. First round happened as part of ingest.
    #
    # We assume that all potential PII is produced as part of stacktrace
    # processors and event enhancers.
    #
    # We assume that plugins for eg sessionstack (running via
    # `plugin.get_event_preprocessors`) are not producing data that should be
    # PII-stripped, ever.
    #
    # XXX(markus): Javascript event error translation is happening after this block
    # because it uses `get_event_preprocessors` instead of
    # `get_event_enhancers`, possibly move?
    if has_changed and with_datascrubbing:
        with metrics.timer("tasks.store.datascrubbers.scrub"):
            project_config = get_project_config(project)

            new_data = safe_execute(
                scrub_data,
                project_config=project_config,
                event=data.data,
                in_processing=True,
                old_event=data_bak,
            )

            if new_data is not None:
                data.data = new_data

    # TODO(dcramer): ideally we would know if data changed by default
    # Default event processors.
    for plugin in plugins.all(version=2):
        processors = safe_execute(plugin.get_event_preprocessors,
                                  data=data,
                                  _with_transaction=False)
        for processor in processors or ():
            result = safe_execute(processor, data)
            if result:
                data = result
                has_changed = True

    assert data[
        "project"] == project_id, "Project cannot be mutated by plugins"

    # We cannot persist canonical types in the cache, so we need to
    # downgrade this.
    if isinstance(data, CANONICAL_TYPES):
        data = dict(data.items())

    if has_changed:
        # Run some of normalization again such that we don't:
        # - persist e.g. incredibly large stacktraces from minidumps
        # - store event timestamps that are older than our retention window
        #   (also happening with minidumps)
        normalizer = StoreNormalizer(remove_other=False,
                                     is_renormalize=True,
                                     **DEFAULT_STORE_NORMALIZER_ARGS)
        data = normalizer.normalize_event(dict(data))

        issues = data.get("processing_issues")

        try:
            if issues and create_failed_event(
                    cache_key,
                    data,
                    project_id,
                    list(issues.values()),
                    event_id=event_id,
                    start_time=start_time,
                    reprocessing_rev=reprocessing_rev,
            ):
                return
        except RetryProcessing:
            # If `create_failed_event` indicates that we need to retry we
            # invoke outselves again.  This happens when the reprocessing
            # revision changed while we were processing.
            from_reprocessing = process_task is process_event_from_reprocessing
            submit_process(project, from_reprocessing, cache_key, event_id,
                           start_time, data)
            process_task.delay(cache_key,
                               start_time=start_time,
                               event_id=event_id)
            return

        default_cache.set(cache_key, data, 3600)

    submit_save_event(project, cache_key, event_id, start_time, data)
コード例 #39
0
def should_comment_sync(installation, external_issue):
    organization = Organization.objects.get(id=external_issue.organization_id)
    has_issue_sync = features.has("organizations:integrations-issue-sync",
                                  organization)
    return has_issue_sync and installation.should_sync("comment")
コード例 #40
0
    def post(self, request, organization):
        """
        Create a new asynchronous file export task, and
        email user upon completion,
        """
        # Ensure new data-export features are enabled
        if not features.has("organizations:data-export", organization):
            return Response(status=404)

        # Get environment_id and limit if available
        try:
            environment_id = self._get_environment_id_from_request(
                request, organization.id)
        except Environment.DoesNotExist as error:
            return Response(error, status=400)
        limit = request.data.get("limit")

        # Validate the data export payload
        serializer = DataExportQuerySerializer(data=request.data)
        if not serializer.is_valid():
            return Response(serializer.errors, status=400)
        data = serializer.validated_data

        # Validate the project field, if provided
        # A PermissionDenied error will be raised in `_get_projects_by_id` if the request is invalid
        project_query = data["query_info"].get("project")
        if project_query:
            # Coerce the query into a set
            if isinstance(project_query, list):
                projects = self._get_projects_by_id(
                    set(map(int, project_query)), request, organization)
            else:
                projects = self._get_projects_by_id({int(project_query)},
                                                    request, organization)
            data["query_info"]["project"] = [
                project.id for project in projects
            ]

        # Discover Pre-processing
        if data["query_type"] == ExportQueryType.DISCOVER_STR:
            if not features.has("organizations:discover-basic",
                                organization,
                                actor=request.user):
                return Response(status=403)

            if len(data["query_info"].get("field", [])) > MAX_FIELDS:
                detail = "You can export up to {0} fields at a time. Please delete some and try again.".format(
                    MAX_FIELDS)
                raise ParseError(detail=detail)

            if "project" not in data["query_info"]:
                projects = self.get_projects(request, organization)
                data["query_info"]["project"] = [
                    project.id for project in projects
                ]

        try:
            # If this user has sent a sent a request with the same payload and organization,
            # we return them the latest one that is NOT complete (i.e. don't start another)
            query_type = ExportQueryType.from_str(data["query_type"])
            data_export, created = ExportedData.objects.get_or_create(
                organization=organization,
                user=request.user,
                query_type=query_type,
                query_info=data["query_info"],
                date_finished=None,
            )
            status = 200
            if created:
                metrics.incr("dataexport.enqueue",
                             tags={"query_type": data["query_type"]},
                             sample_rate=1.0)
                assemble_download.delay(data_export_id=data_export.id,
                                        export_limit=limit,
                                        environment_id=environment_id)
                status = 201
        except ValidationError as e:
            # This will handle invalid JSON requests
            metrics.incr("dataexport.invalid",
                         tags={"query_type": data.get("query_type")},
                         sample_rate=1.0)
            return Response({"detail": six.text_type(e)}, status=400)
        return Response(serialize(data_export, request.user), status=status)
コード例 #41
0
ファイル: group.py プロジェクト: zl0422/sentry
    def get_attrs(self, item_list, user):
        from sentry.plugins.base import plugins
        from sentry.integrations import IntegrationFeatures
        from sentry.models import PlatformExternalIssue

        GroupMeta.objects.populate_cache(item_list)

        attach_foreignkey(item_list, Group.project)

        if user.is_authenticated() and item_list:
            bookmarks = set(
                GroupBookmark.objects.filter(
                    user=user, group__in=item_list).values_list("group_id",
                                                                flat=True))
            seen_groups = dict(
                GroupSeen.objects.filter(user=user,
                                         group__in=item_list).values_list(
                                             "group_id", "last_seen"))
            subscriptions = self._get_subscriptions(item_list, user)
        else:
            bookmarks = set()
            seen_groups = {}
            subscriptions = defaultdict(lambda: (False, None))

        assignees = {
            a.group_id: a.assigned_actor()
            for a in GroupAssignee.objects.filter(group__in=item_list)
        }
        resolved_assignees = Actor.resolve_dict(assignees)

        ignore_items = {
            g.group_id: g
            for g in GroupSnooze.objects.filter(group__in=item_list)
        }

        resolved_item_list = [
            i for i in item_list if i.status == GroupStatus.RESOLVED
        ]
        if resolved_item_list:
            release_resolutions = {
                i[0]: i[1:]
                for i in GroupResolution.objects.filter(
                    group__in=resolved_item_list).values_list(
                        "group", "type", "release__version", "actor_id")
            }

            # due to our laziness, and django's inability to do a reasonable join here
            # we end up with two queries
            commit_results = list(
                Commit.objects.extra(
                    select={"group_id": "sentry_grouplink.group_id"},
                    tables=["sentry_grouplink"],
                    where=[
                        "sentry_grouplink.linked_id = sentry_commit.id",
                        "sentry_grouplink.group_id IN ({})".format(", ".join(
                            six.text_type(i.id) for i in resolved_item_list)),
                        "sentry_grouplink.linked_type = %s",
                        "sentry_grouplink.relationship = %s",
                    ],
                    params=[
                        int(GroupLink.LinkedType.commit),
                        int(GroupLink.Relationship.resolves)
                    ],
                ))
            commit_resolutions = {
                i.group_id: d
                for i, d in zip(commit_results, serialize(
                    commit_results, user))
            }
        else:
            release_resolutions = {}
            commit_resolutions = {}

        actor_ids = set(r[-1] for r in six.itervalues(release_resolutions))
        actor_ids.update(r.actor_id for r in six.itervalues(ignore_items))
        if actor_ids:
            users = list(User.objects.filter(id__in=actor_ids, is_active=True))
            actors = {u.id: d for u, d in zip(users, serialize(users, user))}
        else:
            actors = {}

        share_ids = dict(
            GroupShare.objects.filter(group__in=item_list).values_list(
                "group_id", "uuid"))

        result = {}

        seen_stats = self._get_seen_stats(item_list, user)

        annotations_by_group_id = defaultdict(list)

        organization_id_list = list(
            set(item.project.organization_id for item in item_list))
        # if no groups, then we can't proceed but this seems to be a valid use case
        if not item_list:
            return {}
        if len(organization_id_list) > 1:
            # this should never happen but if it does we should know about it
            logger.warn(
                u"Found multiple organizations for groups: %s, with orgs: %s" %
                ([item.id for item in item_list], organization_id_list))

        # should only have 1 org at this point
        organization_id = organization_id_list[0]
        organization = Organization.objects.get_from_cache(id=organization_id)

        has_unhandled_flag = features.has("organizations:unhandled-issue-flag",
                                          organization,
                                          actor=user)

        # find all the integration installs that have issue tracking
        for integration in Integration.objects.filter(
                organizations=organization_id):
            if not (integration.has_feature(IntegrationFeatures.ISSUE_BASIC) or
                    integration.has_feature(IntegrationFeatures.ISSUE_SYNC)):
                continue

            install = integration.get_installation(organization_id)
            local_annotations_by_group_id = (safe_execute(
                install.get_annotations_for_group_list,
                group_list=item_list,
                _with_transaction=False,
            ) or {})
            merge_list_dictionaries(annotations_by_group_id,
                                    local_annotations_by_group_id)

        # find the external issues for sentry apps and add them in
        local_annotations_by_group_id = (safe_execute(
            PlatformExternalIssue.get_annotations_for_group_list,
            group_list=item_list,
            _with_transaction=False,
        ) or {})
        merge_list_dictionaries(annotations_by_group_id,
                                local_annotations_by_group_id)

        snuba_stats = {}
        if has_unhandled_flag:
            snuba_stats = self._get_group_snuba_stats(item_list, seen_stats)

        for item in item_list:
            active_date = item.active_at or item.first_seen

            annotations = []
            annotations.extend(annotations_by_group_id[item.id])

            # add the annotations for plugins
            # note that the model GroupMeta where all the information is stored is already cached at the top of this function
            # so these for loops doesn't make a bunch of queries
            for plugin in plugins.for_project(project=item.project, version=1):
                safe_execute(plugin.tags,
                             None,
                             item,
                             annotations,
                             _with_transaction=False)
            for plugin in plugins.for_project(project=item.project, version=2):
                annotations.extend(
                    safe_execute(plugin.get_annotations,
                                 group=item,
                                 _with_transaction=False) or ())

            resolution_actor = None
            resolution_type = None
            resolution = release_resolutions.get(item.id)
            if resolution:
                resolution_type = "release"
                resolution_actor = actors.get(resolution[-1])
            if not resolution:
                resolution = commit_resolutions.get(item.id)
                if resolution:
                    resolution_type = "commit"

            ignore_item = ignore_items.get(item.id)
            if ignore_item:
                ignore_actor = actors.get(ignore_item.actor_id)
            else:
                ignore_actor = None

            result[item] = {
                "assigned_to": resolved_assignees.get(item.id),
                "is_bookmarked": item.id in bookmarks,
                "subscription": subscriptions[item.id],
                "has_seen":
                seen_groups.get(item.id, active_date) > active_date,
                "annotations": annotations,
                "ignore_until": ignore_item,
                "ignore_actor": ignore_actor,
                "resolution": resolution,
                "resolution_type": resolution_type,
                "resolution_actor": resolution_actor,
                "share_id": share_ids.get(item.id),
            }

            if has_unhandled_flag:
                result[item]["is_unhandled"] = bool(
                    snuba_stats.get(item.id, {}).get("unhandled"))

            if seen_stats:
                result[item].update(seen_stats.get(item, {}))
        return result
コード例 #42
0
 def has_permission(self, request):
     return features.has('organizations:create', actor=request.user)
コード例 #43
0
ファイル: project_group_index.py プロジェクト: yangou/sentry
    def put(self, request, project):
        """
        Bulk Mutate a List of Issues
        ````````````````````````````

        Bulk mutate various attributes on issues.  The list of issues
        to modify is given through the `id` query parameter.  It is repeated
        for each issue that should be modified.

        - For non-status updates, the `id` query parameter is required.
        - For status updates, the `id` query parameter may be omitted
          for a batch "update all" query.
        - An optional `status` query parameter may be used to restrict
          mutations to only events with the given status.

        The following attributes can be modified and are supplied as
        JSON object in the body:

        If any ids are out of scope this operation will succeed without
        any data mutation.

        :qparam int id: a list of IDs of the issues to be mutated.  This
                        parameter shall be repeated for each issue.  It
                        is optional only if a status is mutated in which
                        case an implicit `update all` is assumed.
        :qparam string status: optionally limits the query to issues of the
                               specified status.  Valid values are
                               ``"resolved"``, ``"unresolved"`` and
                               ``"ignored"``.
        :pparam string organization_slug: the slug of the organization the
                                          issues belong to.
        :pparam string project_slug: the slug of the project the issues
                                     belong to.
        :param string status: the new status for the issues.  Valid values
                              are ``"resolved"``, ``"resolvedInNextRelease"``,
                              ``"unresolved"``, and ``"ignored"``.
        :param map statusDetails: additional details about the resolution.
                                  Valid values are ``"inRelease"``, ``"inNextRelease"``,
                                  ``"inCommit"``,  ``"ignoreDuration"``, ``"ignoreCount"``,
                                  ``"ignoreWindow"``, ``"ignoreUserCount"``, and
                                  ``"ignoreUserWindow"``.
        :param int ignoreDuration: the number of minutes to ignore this issue.
        :param boolean isPublic: sets the issue to public or private.
        :param boolean merge: allows to merge or unmerge different issues.
        :param string assignedTo: the actor id (or username) of the user or team that should be
                                  assigned to this issue.
        :param boolean hasSeen: in case this API call is invoked with a user
                                context this allows changing of the flag
                                that indicates if the user has seen the
                                event.
        :param boolean isBookmarked: in case this API call is invoked with a
                                     user context this allows changing of
                                     the bookmark flag.
        :auth: required
        """
        group_ids = request.GET.getlist('id')
        if group_ids:
            group_list = Group.objects.filter(
                project=project, id__in=group_ids)
            # filter down group ids to only valid matches
            group_ids = [g.id for g in group_list]
            if not group_ids:
                return Response(status=204)
        else:
            group_list = None

        serializer = GroupValidator(
            data=request.DATA,
            partial=True,
            context={'project': project},
        )
        if not serializer.is_valid():
            return Response(serializer.errors, status=400)
        result = dict(serializer.object)

        acting_user = request.user if request.user.is_authenticated() else None

        if not group_ids:
            try:
                # bulk mutations are limited to 1000 items
                # TODO(dcramer): it'd be nice to support more than this, but its
                # a bit too complicated right now
                cursor_result, _ = self._search(request, project, {
                    'limit': 1000,
                    'paginator_options': {'max_limit': 1000},
                })
            except ValidationError as exc:
                return Response({'detail': six.text_type(exc)}, status=400)

            group_list = list(cursor_result)
            group_ids = [g.id for g in group_list]

        is_bulk = len(group_ids) > 1

        queryset = Group.objects.filter(
            id__in=group_ids,
        )

        discard = result.get('discard')
        if discard:

            if not features.has('projects:discard-groups', project, actor=request.user):
                return Response({'detail': ['You do not have that feature enabled']}, status=400)

            group_list = list(queryset)
            groups_to_delete = []

            for group in group_list:
                with transaction.atomic():
                    try:
                        tombstone = GroupTombstone.objects.create(
                            previous_group_id=group.id,
                            actor_id=acting_user.id if acting_user else None,
                            **{name: getattr(group, name) for name in TOMBSTONE_FIELDS_FROM_GROUP}
                        )
                    except IntegrityError:
                        # in this case, a tombstone has already been created
                        # for a group, so no hash updates are necessary
                        pass
                    else:
                        groups_to_delete.append(group)

                        GroupHash.objects.filter(
                            group=group,
                        ).update(
                            group=None,
                            group_tombstone_id=tombstone.id,
                        )

            self._delete_groups(request, project, groups_to_delete, delete_type='discard')

            return Response(status=204)

        statusDetails = result.pop('statusDetails', result)
        status = result.get('status')
        release = None
        commit = None

        if status in ('resolved', 'resolvedInNextRelease'):
            if status == 'resolvedInNextRelease' or statusDetails.get('inNextRelease'):
                # XXX(dcramer): this code is copied between the inNextRelease validator
                # due to the status vs statusDetails field
                release = statusDetails.get('inNextRelease') or Release.objects.filter(
                    projects=project,
                    organization_id=project.organization_id,
                ).extra(select={
                    'sort': 'COALESCE(date_released, date_added)',
                }).order_by('-sort')[0]
                activity_type = Activity.SET_RESOLVED_IN_RELEASE
                activity_data = {
                    # no version yet
                    'version': '',
                }
                status_details = {
                    'inNextRelease': True,
                    'actor': serialize(extract_lazy_object(request.user), request.user),
                }
                res_type = GroupResolution.Type.in_next_release
                res_type_str = 'in_next_release'
                res_status = GroupResolution.Status.pending
            elif statusDetails.get('inRelease'):
                release = statusDetails['inRelease']
                activity_type = Activity.SET_RESOLVED_IN_RELEASE
                activity_data = {
                    # no version yet
                    'version': release.version,
                }
                status_details = {
                    'inRelease': release.version,
                    'actor': serialize(extract_lazy_object(request.user), request.user),
                }
                res_type = GroupResolution.Type.in_release
                res_type_str = 'in_release'
                res_status = GroupResolution.Status.resolved
            elif statusDetails.get('inCommit'):
                commit = statusDetails['inCommit']
                activity_type = Activity.SET_RESOLVED_IN_COMMIT
                activity_data = {
                    'commit': commit.id,
                }
                status_details = {
                    'inCommit': serialize(commit, request.user),
                    'actor': serialize(extract_lazy_object(request.user), request.user),
                }
                res_type_str = 'in_commit'
            else:
                res_type_str = 'now'
                activity_type = Activity.SET_RESOLVED
                activity_data = {}
                status_details = {}

            now = timezone.now()
            metrics.incr('group.resolved', instance=res_type_str, skip_internal=True)

            # if we've specified a commit, let's see if its already been released
            # this will allow us to associate the resolution to a release as if we
            # were simply using 'inRelease' above
            # Note: this is different than the way commit resolution works on deploy
            # creation, as a given deploy is connected to an explicit release, and
            # in this case we're simply choosing the most recent release which contains
            # the commit.
            if commit and not release:
                try:
                    release = Release.objects.filter(
                        projects=project,
                        releasecommit__commit=commit,
                    ).extra(select={
                        'sort': 'COALESCE(date_released, date_added)',
                    }).order_by('-sort')[0]
                    res_type = GroupResolution.Type.in_release
                    res_status = GroupResolution.Status.resolved
                except IndexError:
                    release = None

            for group in group_list:
                with transaction.atomic():
                    resolution = None
                    if release:
                        resolution_params = {
                            'release': release,
                            'type': res_type,
                            'status': res_status,
                            'actor_id': request.user.id
                            if request.user.is_authenticated() else None,
                        }
                        resolution, created = GroupResolution.objects.get_or_create(
                            group=group,
                            defaults=resolution_params,
                        )
                        if not created:
                            resolution.update(
                                datetime=timezone.now(), **resolution_params)

                    if commit:
                        GroupLink.objects.create(
                            group_id=group.id,
                            project_id=group.project_id,
                            linked_type=GroupLink.LinkedType.commit,
                            relationship=GroupLink.Relationship.resolves,
                            linked_id=commit.id,
                        )

                    affected = Group.objects.filter(
                        id=group.id,
                    ).update(
                        status=GroupStatus.RESOLVED,
                        resolved_at=now,
                    )
                    if not resolution:
                        created = affected

                    group.status = GroupStatus.RESOLVED
                    group.resolved_at = now

                    self._subscribe_and_assign_issue(
                        acting_user, group, result)

                    if created:
                        activity = Activity.objects.create(
                            project=group.project,
                            group=group,
                            type=activity_type,
                            user=acting_user,
                            ident=resolution.id if resolution else None,
                            data=activity_data,
                        )
                        # TODO(dcramer): we need a solution for activity rollups
                        # before sending notifications on bulk changes
                        if not is_bulk:
                            activity.send_notification()

                if release:
                    issue_resolved_in_release.send_robust(
                        group=group,
                        project=project,
                        user=acting_user,
                        resolution_type=res_type_str,
                        sender=type(self),
                    )
                elif commit:
                    resolved_with_commit.send_robust(
                        organization_id=group.project.organization_id,
                        user=request.user,
                        group=group,
                        sender=type(self),
                    )

                kick_off_status_syncs.apply_async(kwargs={
                    'project_id': group.project_id,
                    'group_id': group.id,
                })

            result.update({
                'status': 'resolved',
                'statusDetails': status_details,
            })

        elif status:
            new_status = STATUS_CHOICES[result['status']]

            with transaction.atomic():
                happened = queryset.exclude(
                    status=new_status,
                ).update(
                    status=new_status,
                )

                GroupResolution.objects.filter(
                    group__in=group_ids,
                ).delete()

                if new_status == GroupStatus.IGNORED:
                    metrics.incr('group.ignored', skip_internal=True)

                    ignore_duration = (
                        statusDetails.pop('ignoreDuration', None) or
                        statusDetails.pop('snoozeDuration', None)
                    ) or None
                    ignore_count = statusDetails.pop(
                        'ignoreCount', None) or None
                    ignore_window = statusDetails.pop(
                        'ignoreWindow', None) or None
                    ignore_user_count = statusDetails.pop(
                        'ignoreUserCount', None) or None
                    ignore_user_window = statusDetails.pop(
                        'ignoreUserWindow', None) or None
                    if ignore_duration or ignore_count or ignore_user_count:
                        if ignore_duration:
                            ignore_until = timezone.now() + timedelta(
                                minutes=ignore_duration,
                            )
                        else:
                            ignore_until = None
                        for group in group_list:
                            state = {}
                            if ignore_count and not ignore_window:
                                state['times_seen'] = group.times_seen
                            if ignore_user_count and not ignore_user_window:
                                state['users_seen'] = group.count_users_seen()
                            GroupSnooze.objects.create_or_update(
                                group=group,
                                values={
                                    'until':
                                    ignore_until,
                                    'count':
                                    ignore_count,
                                    'window':
                                    ignore_window,
                                    'user_count':
                                    ignore_user_count,
                                    'user_window':
                                    ignore_user_window,
                                    'state':
                                    state,
                                    'actor_id':
                                    request.user.id if request.user.is_authenticated() else None,
                                }
                            )
                            result['statusDetails'] = {
                                'ignoreCount': ignore_count,
                                'ignoreUntil': ignore_until,
                                'ignoreUserCount': ignore_user_count,
                                'ignoreUserWindow': ignore_user_window,
                                'ignoreWindow': ignore_window,
                                'actor': serialize(extract_lazy_object(request.user), request.user),
                            }
                    else:
                        GroupSnooze.objects.filter(
                            group__in=group_ids,
                        ).delete()
                        ignore_until = None
                        result['statusDetails'] = {}
                else:
                    result['statusDetails'] = {}

            if group_list and happened:
                if new_status == GroupStatus.UNRESOLVED:
                    activity_type = Activity.SET_UNRESOLVED
                    activity_data = {}
                elif new_status == GroupStatus.IGNORED:
                    activity_type = Activity.SET_IGNORED
                    activity_data = {
                        'ignoreCount': ignore_count,
                        'ignoreDuration': ignore_duration,
                        'ignoreUntil': ignore_until,
                        'ignoreUserCount': ignore_user_count,
                        'ignoreUserWindow': ignore_user_window,
                        'ignoreWindow': ignore_window,
                    }

                issue_ignored.send_robust(
                    project=project,
                    user=acting_user,
                    group_list=group_list,
                    activity_data=activity_data,
                    sender=self.__class__)

                for group in group_list:
                    group.status = new_status

                    activity = Activity.objects.create(
                        project=group.project,
                        group=group,
                        type=activity_type,
                        user=acting_user,
                        data=activity_data,
                    )
                    # TODO(dcramer): we need a solution for activity rollups
                    # before sending notifications on bulk changes
                    if not is_bulk:
                        if acting_user:
                            GroupSubscription.objects.subscribe(
                                user=acting_user,
                                group=group,
                                reason=GroupSubscriptionReason.status_change,
                            )
                        activity.send_notification()

                    if new_status == GroupStatus.UNRESOLVED:
                        kick_off_status_syncs.apply_async(kwargs={
                            'project_id': group.project_id,
                            'group_id': group.id,
                        })

        if 'assignedTo' in result:
            assigned_actor = result['assignedTo']
            if assigned_actor:
                for group in group_list:
                    resolved_actor = assigned_actor.resolve()

                    GroupAssignee.objects.assign(group, resolved_actor, acting_user)
                result['assignedTo'] = serialize(
                    assigned_actor.resolve(), acting_user, ActorSerializer())
            else:
                for group in group_list:
                    GroupAssignee.objects.deassign(group, acting_user)

        if result.get('hasSeen') and project.member_set.filter(user=acting_user).exists():
            for group in group_list:
                instance, created = create_or_update(
                    GroupSeen,
                    group=group,
                    user=acting_user,
                    project=group.project,
                    values={
                        'last_seen': timezone.now(),
                    }
                )
        elif result.get('hasSeen') is False:
            GroupSeen.objects.filter(
                group__in=group_ids,
                user=acting_user,
            ).delete()

        if result.get('isBookmarked'):
            for group in group_list:
                GroupBookmark.objects.get_or_create(
                    project=project,
                    group=group,
                    user=acting_user,
                )
                GroupSubscription.objects.subscribe(
                    user=acting_user,
                    group=group,
                    reason=GroupSubscriptionReason.bookmark,
                )
        elif result.get('isBookmarked') is False:
            GroupBookmark.objects.filter(
                group__in=group_ids,
                user=acting_user,
            ).delete()

        # TODO(dcramer): we could make these more efficient by first
        # querying for rich rows are present (if N > 2), flipping the flag
        # on those rows, and then creating the missing rows
        if result.get('isSubscribed') in (True, False):
            is_subscribed = result['isSubscribed']
            for group in group_list:
                # NOTE: Subscribing without an initiating event (assignment,
                # commenting, etc.) clears out the previous subscription reason
                # to avoid showing confusing messaging as a result of this
                # action. It'd be jarring to go directly from "you are not
                # subscribed" to "you were subscribed due since you were
                # assigned" just by clicking the "subscribe" button (and you
                # may no longer be assigned to the issue anyway.)
                GroupSubscription.objects.create_or_update(
                    user=acting_user,
                    group=group,
                    project=project,
                    values={
                        'is_active': is_subscribed,
                        'reason': GroupSubscriptionReason.unknown,
                    },
                )

            result['subscriptionDetails'] = {
                'reason': SUBSCRIPTION_REASON_MAP.get(
                    GroupSubscriptionReason.unknown,
                    'unknown',
                ),
            }

        if 'isPublic' in result:
            # We always want to delete an existing share, because triggering
            # an isPublic=True even when it's already public, should trigger
            # regenerating.
            for group in group_list:
                if GroupShare.objects.filter(group=group).delete():
                    result['shareId'] = None
                    Activity.objects.create(
                        project=group.project,
                        group=group,
                        type=Activity.SET_PRIVATE,
                        user=acting_user,
                    )

        if result.get('isPublic'):
            for group in group_list:
                share, created = GroupShare.objects.get_or_create(
                    project=group.project,
                    group=group,
                    user=acting_user,
                )
                if created:
                    result['shareId'] = share.uuid
                    Activity.objects.create(
                        project=group.project,
                        group=group,
                        type=Activity.SET_PUBLIC,
                        user=acting_user,
                    )

        # XXX(dcramer): this feels a bit shady like it should be its own
        # endpoint
        if result.get('merge') and len(group_list) > 1:
            group_list_by_times_seen = sorted(
                group_list,
                key=lambda g: (g.times_seen, g.id),
                reverse=True,
            )
            primary_group, groups_to_merge = group_list_by_times_seen[0], group_list_by_times_seen[1:]

            group_ids_to_merge = [g.id for g in groups_to_merge]
            eventstream_state = eventstream.start_merge(
                primary_group.project_id,
                group_ids_to_merge,
                primary_group.id
            )

            Group.objects.filter(
                id__in=group_ids_to_merge
            ).update(
                status=GroupStatus.PENDING_MERGE
            )

            transaction_id = uuid4().hex
            merge_groups.delay(
                from_object_ids=group_ids_to_merge,
                to_object_id=primary_group.id,
                transaction_id=transaction_id,
                eventstream_state=eventstream_state,
            )

            Activity.objects.create(
                project=primary_group.project,
                group=primary_group,
                type=Activity.MERGE,
                user=acting_user,
                data={
                    'issues': [{
                        'id': c.id
                    } for c in groups_to_merge],
                },
            )

            result['merge'] = {
                'parent': six.text_type(primary_group.id),
                'children': [six.text_type(g.id) for g in groups_to_merge],
            }

        return Response(result)
コード例 #44
0
 def has_feature(self, request, organization):
     return features.has("organizations:performance-view",
                         organization,
                         actor=request.user)
コード例 #45
0
def sync_group_assignee_inbound(integration,
                                email,
                                external_issue_key,
                                assign=True):
    """
    Given an integration, user email address and an external issue key,
    assign linked groups to matching users. Checks project membership.
    Returns a list of groups that were successfully assigned.
    """
    from sentry import features
    from sentry.models import Group, UserEmail, User

    logger = logging.getLogger("sentry.integrations.%s" % integration.provider)

    orgs_with_sync_enabled = []
    for org in integration.organizations.all():
        has_issue_sync = features.has("organizations:integrations-issue-sync",
                                      org)
        if not has_issue_sync:
            continue

        installation = integration.get_installation(org.id)
        if installation.should_sync("inbound_assignee"):
            orgs_with_sync_enabled.append(org.id)

    affected_groups = list(
        Group.objects.get_groups_by_external_issue(
            integration, external_issue_key).filter(
                project__organization_id__in=orgs_with_sync_enabled))

    if not affected_groups:
        return []

    if not assign:
        for group in affected_groups:
            GroupAssignee.objects.deassign(group)
        return affected_groups

    users = {
        u.id: u
        for u in User.objects.filter(id__in=UserEmail.objects.filter(
            is_verified=True, email=email).values_list("user_id", flat=True))
    }

    projects_by_user = get_user_project_ids(list(users.values()))

    groups_assigned = []
    for group in affected_groups:
        try:
            user_id = [
                user_id for user_id, projects in projects_by_user.items()
                if group.project_id in projects
            ][0]
        except IndexError:
            logger.info(
                "assignee-not-found-inbound",
                extra={
                    "integration_id": integration.id,
                    "email": email,
                    "issue_key": external_issue_key,
                },
            )
        else:
            user = users[user_id]
            GroupAssignee.objects.assign(group, user)
            groups_assigned.append(group)

    return groups_assigned
コード例 #46
0
ファイル: files.py プロジェクト: webZW/sentry
def get_max_file_size(organization):
    """Returns the maximum allowed debug file size for this organization."""
    if features.has('organizations:large-debug-files', organization):
        return MAX_FILE_SIZE
    else:
        return options.get('system.maximum-file-size')
コード例 #47
0
    def assign(self, group, assigned_to, acting_user=None):
        from sentry import features
        from sentry.models import User, Team, GroupSubscription, GroupSubscriptionReason

        GroupSubscription.objects.subscribe_actor(
            group=group,
            actor=assigned_to,
            reason=GroupSubscriptionReason.assigned)

        if isinstance(assigned_to, User):
            assignee_type = "user"
            other_type = "team"
        elif isinstance(assigned_to, Team):
            assignee_type = "team"
            other_type = "user"
        else:
            raise AssertionError("Invalid type to assign to: %r" %
                                 type(assigned_to))

        now = timezone.now()
        assignee, created = GroupAssignee.objects.get_or_create(
            group=group,
            defaults={
                "project": group.project,
                assignee_type: assigned_to,
                "date_added": now
            },
        )

        if not created:
            affected = (GroupAssignee.objects.filter(group=group).exclude(
                **{
                    assignee_type: assigned_to
                }).update(
                    **{
                        assignee_type: assigned_to,
                        other_type: None,
                        "date_added": now
                    }))
        else:
            affected = True
            issue_assigned.send_robust(project=group.project,
                                       group=group,
                                       user=acting_user,
                                       sender=self.__class__)

        if affected:
            activity = Activity.objects.create(
                project=group.project,
                group=group,
                type=Activity.ASSIGNED,
                user=acting_user,
                data={
                    "assignee": six.text_type(assigned_to.id),
                    "assigneeEmail": getattr(assigned_to, "email", None),
                    "assigneeType": assignee_type,
                },
            )
            activity.send_notification()
            metrics.incr("group.assignee.change",
                         instance="assigned",
                         skip_internal=True)
            # sync Sentry assignee to external issues
            if assignee_type == "user" and features.has(
                    "organizations:integrations-issue-sync",
                    group.organization,
                    actor=acting_user):
                sync_group_assignee_outbound(group,
                                             assigned_to.id,
                                             assign=True)
コード例 #48
0
    def validate(self, data):
        organization = self.context["organization"]
        query_info = data["query_info"]

        # Validate the project field, if provided
        # A PermissionDenied error will be raised in `get_projects_by_id` if the request is invalid
        project_query = query_info.get("project")
        if project_query:
            get_projects_by_id = self.context["get_projects_by_id"]
            # Coerce the query into a set
            if isinstance(project_query, list):
                projects = get_projects_by_id(set(map(int, project_query)))
            else:
                projects = get_projects_by_id({int(project_query)})
            query_info["project"] = [project.id for project in projects]

        # Discover Pre-processing
        if data["query_type"] == ExportQueryType.DISCOVER_STR:
            # coerce the fields into a list as needed
            base_fields = query_info.get("field", [])
            if not isinstance(base_fields, list):
                base_fields = [base_fields]

            equations, fields = categorize_columns(base_fields)

            if len(base_fields) > MAX_FIELDS:
                detail = f"You can export up to {MAX_FIELDS} fields at a time. Please delete some and try again."
                raise serializers.ValidationError(detail)
            elif len(base_fields) == 0:
                raise serializers.ValidationError("at least one field is required to export")

            if "query" not in query_info:
                detail = "query is a required to export, please pass an empty string if you don't want to set one"
                raise serializers.ValidationError(detail)

            query_info["field"] = fields
            query_info["equations"] = equations

            if not query_info.get("project"):
                projects = self.context["get_projects"]()
                query_info["project"] = [project.id for project in projects]

            # make sure to fix the export start/end times to ensure consistent results
            try:
                start, end = get_date_range_from_params(query_info)
            except InvalidParams as e:
                sentry_sdk.set_tag("query.error_reason", "Invalid date params")
                raise serializers.ValidationError(str(e))

            if "statsPeriod" in query_info:
                del query_info["statsPeriod"]
            if "statsPeriodStart" in query_info:
                del query_info["statsPeriodStart"]
            if "statsPeriodEnd" in query_info:
                del query_info["statsPeriodEnd"]
            query_info["start"] = start.isoformat()
            query_info["end"] = end.isoformat()
            query_info["use_snql"] = features.has("organizations:discover-use-snql", organization)

            # validate the query string by trying to parse it
            processor = DiscoverProcessor(
                discover_query=query_info,
                organization_id=organization.id,
            )
            try:
                builder = QueryBuilder(
                    Dataset.Discover,
                    processor.params,
                    query=query_info["query"],
                    selected_columns=fields.copy(),
                    equations=equations,
                    auto_fields=True,
                    auto_aggregations=True,
                )
                builder.get_snql_query()
            except InvalidSearchQuery as err:
                raise serializers.ValidationError(str(err))

        return data
コード例 #49
0
    def process_update(self, subscription_update):
        dataset = self.subscription.snuba_query.dataset
        try:
            # Check that the project exists
            self.subscription.project
        except Project.DoesNotExist:
            metrics.incr("incidents.alert_rules.ignore_deleted_project")
            return
        if dataset == "events" and not features.has(
                "organizations:incidents",
                self.subscription.project.organization):
            # They have downgraded since these subscriptions have been created. So we just ignore updates for now.
            metrics.incr(
                "incidents.alert_rules.ignore_update_missing_incidents")
            return
        elif dataset == "transactions" and not features.has(
                "organizations:performance-view",
                self.subscription.project.organization):
            # They have downgraded since these subscriptions have been created. So we just ignore updates for now.
            metrics.incr(
                "incidents.alert_rules.ignore_update_missing_incidents_performance"
            )
            return

        if not hasattr(self, "alert_rule"):
            # If the alert rule has been removed then just skip
            metrics.incr(
                "incidents.alert_rules.no_alert_rule_for_subscription")
            logger.error(
                "Received an update for a subscription, but no associated alert rule exists"
            )
            # TODO: Delete subscription here.
            return

        if subscription_update["timestamp"] <= self.last_update:
            metrics.incr(
                "incidents.alert_rules.skipping_already_processed_update")
            return

        self.last_update = subscription_update["timestamp"]

        if len(subscription_update["values"]["data"]) > 1:
            logger.warning(
                "Subscription returned more than 1 row of data",
                extra={
                    "subscription_id": self.subscription.id,
                    "dataset": self.subscription.snuba_query.dataset,
                    "snuba_subscription_id": self.subscription.subscription_id,
                    "result": subscription_update,
                },
            )
        aggregation_value = list(
            subscription_update["values"]["data"][0].values())[0]
        # In some cases Snuba can return a None value for an aggregation. This means
        # there were no rows present when we made the query, for certain types of
        # aggregations like avg. Defaulting this to 0 for now. It might turn out that
        # we'd prefer to skip the update in the future.
        if aggregation_value is None:
            aggregation_value = 0
        alert_operator, resolve_operator = self.THRESHOLD_TYPE_OPERATORS[
            AlertRuleThresholdType(self.alert_rule.threshold_type)]
        fired_incident_triggers = []
        with transaction.atomic():
            for trigger in self.triggers:
                if alert_operator(aggregation_value, trigger.alert_threshold
                                  ) and not self.check_trigger_status(
                                      trigger, TriggerStatus.ACTIVE):
                    metrics.incr("incidents.alert_rules.threshold",
                                 tags={"type": "alert"})
                    incident_trigger = self.trigger_alert_threshold(
                        trigger, aggregation_value)
                    if incident_trigger is not None:
                        fired_incident_triggers.append(incident_trigger)
                else:
                    self.trigger_alert_counts[trigger.id] = 0

                if (resolve_operator(aggregation_value,
                                     self.calculate_resolve_threshold(trigger))
                        and self.active_incident and self.check_trigger_status(
                            trigger, TriggerStatus.ACTIVE)):
                    metrics.incr("incidents.alert_rules.threshold",
                                 tags={"type": "resolve"})
                    incident_trigger = self.trigger_resolve_threshold(
                        trigger, aggregation_value)
                    if incident_trigger is not None:
                        fired_incident_triggers.append(incident_trigger)
                else:
                    self.trigger_resolve_counts[trigger.id] = 0

            if fired_incident_triggers:
                self.handle_trigger_actions(fired_incident_triggers,
                                            aggregation_value)

        # We update the rule stats here after we commit the transaction. This guarantees
        # that we'll never miss an update, since we'll never roll back if the process
        # is killed here. The trade-off is that we might process an update twice. Mostly
        # this will have no effect, but if someone manages to close a triggered incident
        # before the next one then we might alert twice.
        self.update_alert_rule_stats()
コード例 #50
0
def get_project_config(project, full_config=True, project_keys=None):
    """
    Constructs the ProjectConfig information.

    :param project: The project to load configuration for. Ensure that
        organization is bound on this object; otherwise it will be loaded from
        the database.
    :param full_config: True if only the full config is required, False
        if only the restricted (for external relays) is required
        (default True, i.e. full configuration)
    :param project_keys: Pre-fetched project keys for performance. However, if
        no project keys are provided it is assumed that the config does not
        need to contain auth information (this is the case when used in
        python's StoreView)

    :return: a ProjectConfig object for the given project
    """
    with configure_scope() as scope:
        scope.set_tag("project", project.id)

    if project.status != ObjectStatus.VISIBLE:
        return ProjectConfig(project, disabled=True)

    public_keys = get_public_key_configs(project,
                                         full_config,
                                         project_keys=project_keys)

    with Hub.current.start_span(op="get_public_config"):
        now = datetime.utcnow().replace(tzinfo=utc)
        cfg = {
            "disabled": False,
            "slug": project.slug,
            "lastFetch": now,
            "lastChange": project.get_option("sentry:relay-rev-lastchange",
                                             now),
            "rev": project.get_option("sentry:relay-rev",
                                      uuid.uuid4().hex),
            "publicKeys": public_keys,
            "config": {
                "allowedDomains":
                list(get_origins(project)),
                "trustedRelays": [
                    r["public_key"] for r in project.organization.get_option(
                        "sentry:trusted-relays", []) if r
                ],
                "piiConfig":
                get_pii_config(project),
                "datascrubbingSettings":
                get_datascrubbing_settings(project),
                "features":
                get_exposed_features(project),
            },
            "organizationId": project.organization_id,
            "projectId":
            project.id,  # XXX: Unused by Relay, required by Python store
        }
    allow_dynamic_sampling = features.has(
        "organizations:filters-and-sampling",
        project.organization,
    )
    if allow_dynamic_sampling:
        dynamic_sampling = project.get_option("sentry:dynamic_sampling")
        if dynamic_sampling is not None:
            cfg["config"]["dynamicSampling"] = dynamic_sampling

    if not full_config:
        # This is all we need for external Relay processors
        return ProjectConfig(project, **cfg)

    if features.has("organizations:performance-ops-breakdown",
                    project.organization):
        cfg["config"]["breakdownsV2"] = project.get_option("sentry:breakdowns")
    if features.has("organizations:performance-suspect-spans-ingestion",
                    project.organization):
        cfg["config"]["spanAttributes"] = project.get_option(
            "sentry:span_attributes")
    with Hub.current.start_span(op="get_filter_settings"):
        cfg["config"]["filterSettings"] = get_filter_settings(project)
    with Hub.current.start_span(op="get_grouping_config_dict_for_project"):
        cfg["config"]["groupingConfig"] = get_grouping_config_dict_for_project(
            project)
    with Hub.current.start_span(op="get_event_retention"):
        cfg["config"]["eventRetention"] = quotas.get_event_retention(
            project.organization)
    with Hub.current.start_span(op="get_all_quotas"):
        cfg["config"]["quotas"] = get_quotas(project, keys=project_keys)

    return ProjectConfig(project, **cfg)
コード例 #51
0
ファイル: group_notes.py プロジェクト: sendayang/sentry
    def post(self, request, group):
        serializer = NoteSerializer(data=request.DATA,
                                    context={'group': group})

        if not serializer.is_valid():
            return Response(serializer.errors,
                            status=status.HTTP_400_BAD_REQUEST)

        data = dict(serializer.object)

        mentions = data.pop('mentions', [])

        if Activity.objects.filter(group=group,
                                   type=Activity.NOTE,
                                   user=request.user,
                                   data=data,
                                   datetime__gte=timezone.now() -
                                   timedelta(hours=1)).exists():
            return Response(
                '{"detail": "You have already posted that comment."}',
                status=status.HTTP_400_BAD_REQUEST)

        GroupSubscription.objects.subscribe(
            group=group,
            user=request.user,
            reason=GroupSubscriptionReason.comment,
        )

        actors = Actor.resolve_many(mentions)
        actor_mentions = seperate_resolved_actors(actors)

        for user in actor_mentions.get('users'):
            GroupSubscription.objects.subscribe(
                group=group,
                user=user,
                reason=GroupSubscriptionReason.mentioned,
            )

        mentioned_teams = actor_mentions.get('teams')

        mentioned_team_users = list(
            User.objects.filter(
                sentry_orgmember_set__organization_id=group.project.
                organization_id,
                sentry_orgmember_set__organizationmemberteam__team__in=
                mentioned_teams,
                sentry_orgmember_set__organizationmemberteam__is_active=True,
                is_active=True,
            ).exclude(
                id__in={u.id
                        for u in actor_mentions.get('users')}).values_list(
                            'id', flat=True))

        GroupSubscription.objects.bulk_subscribe(
            group=group,
            user_ids=mentioned_team_users,
            reason=GroupSubscriptionReason.team_mentioned,
        )

        activity = Activity.objects.create(
            group=group,
            project=group.project,
            type=Activity.NOTE,
            user=extract_lazy_object(request.user),
            data=data,
        )

        # sync Sentry comments to external issues
        if features.has('organizations:internal-catchall',
                        group.organization,
                        actor=request.user):
            external_issues = list(
                ExternalIssue.objects.filter(id__in=GroupLink.objects.filter(
                    project_id=group.project_id,
                    group_id=group.id,
                    linked_type=GroupLink.LinkedType.issue,
                ).values_list('linked_id', flat=True)))

            if external_issues:
                integrations = {
                    i.id: i
                    for i in Integration.objects.filter(id__in=[
                        external_issue.integration_id
                        for external_issue in external_issues
                    ])
                }

                for external_issue in external_issues:
                    integration = integrations[external_issue.integration_id]
                    integration.get_installation().create_comment(
                        external_issue.key, data['text'])

        activity.send_notification()
        return Response(serialize(activity, request.user), status=201)
コード例 #52
0
def _do_process_event(
    cache_key,
    start_time,
    event_id,
    process_task,
    data=None,
    data_has_changed=None,
    from_symbolicate=False,
):
    from sentry.plugins.base import plugins

    if data is None:
        data = event_processing_store.get(cache_key)

    if data is None:
        metrics.incr(
            "events.failed", tags={"reason": "cache", "stage": "process"}, skip_internal=False
        )
        error_logger.error("process.failed.empty", extra={"cache_key": cache_key})
        return

    data = CanonicalKeyDict(data)

    project_id = data["project"]
    set_current_project(project_id)

    event_id = data["event_id"]

    with sentry_sdk.start_span(op="tasks.store.process_event.get_project_from_cache"):
        project = Project.objects.get_from_cache(id=project_id)

    with metrics.timer("tasks.store.process_event.organization.get_from_cache"):
        project._organization_cache = Organization.objects.get_from_cache(
            id=project.organization_id
        )

    has_changed = bool(data_has_changed)

    with sentry_sdk.start_span(op="tasks.store.process_event.get_reprocessing_revision"):
        # Fetch the reprocessing revision
        reprocessing_rev = reprocessing.get_reprocessing_revision(project_id)

    # Stacktrace based event processors.
    with sentry_sdk.start_span(op="task.store.process_event.stacktraces"):
        with metrics.timer(
            "tasks.store.process_event.stacktraces", tags={"from_symbolicate": from_symbolicate}
        ):
            new_data = process_stacktraces(data)

    if new_data is not None:
        has_changed = True
        data = new_data

    # Second round of datascrubbing after stacktrace and language-specific
    # processing. First round happened as part of ingest.
    #
    # *Right now* the only sensitive data that is added in stacktrace
    # processing are usernames in filepaths, so we run directly after
    # stacktrace processors.
    #
    # We do not yet want to deal with context data produced by plugins like
    # sessionstack or fullstory (which are in `get_event_preprocessors`), as
    # this data is very unlikely to be sensitive data. This is why scrubbing
    # happens somewhere in the middle of the pipeline.
    #
    # On the other hand, Javascript event error translation is happening after
    # this block because it uses `get_event_preprocessors` instead of
    # `get_event_enhancers`.
    #
    # We are fairly confident, however, that this should run *before*
    # re-normalization as it is hard to find sensitive data in partially
    # trimmed strings.
    if has_changed and options.get("processing.can-use-scrubbers"):
        with sentry_sdk.start_span(op="task.store.datascrubbers.scrub"):
            with metrics.timer(
                "tasks.store.datascrubbers.scrub", tags={"from_symbolicate": from_symbolicate}
            ):
                new_data = safe_execute(scrub_data, project=project, event=data.data)

                # XXX(markus): When datascrubbing is finally "totally stable", we might want
                # to drop the event if it crashes to avoid saving PII
                if new_data is not None and features.has(
                    "organizations:datascrubbers-v2", project.organization, actor=None
                ):
                    data.data = new_data

    # TODO(dcramer): ideally we would know if data changed by default
    # Default event processors.
    for plugin in plugins.all(version=2):
        with sentry_sdk.start_span(op="task.store.process_event.preprocessors") as span:
            span.set_data("plugin", plugin.slug)
            span.set_data("from_symbolicate", from_symbolicate)
            with metrics.timer(
                "tasks.store.process_event.preprocessors",
                tags={"plugin": plugin.slug, "from_symbolicate": from_symbolicate},
            ):
                processors = safe_execute(
                    plugin.get_event_preprocessors, data=data, _with_transaction=False
                )
                for processor in processors or ():
                    try:
                        result = processor(data)
                    except Exception:
                        error_logger.exception("tasks.store.preprocessors.error")
                        data.setdefault("_metrics", {})["flag.processing.error"] = True
                        has_changed = True
                    else:
                        if result:
                            data = result
                            has_changed = True

    assert data["project"] == project_id, "Project cannot be mutated by plugins"

    # We cannot persist canonical types in the cache, so we need to
    # downgrade this.
    if isinstance(data, CANONICAL_TYPES):
        data = dict(data.items())

    if has_changed:
        # Run some of normalization again such that we don't:
        # - persist e.g. incredibly large stacktraces from minidumps
        # - store event timestamps that are older than our retention window
        #   (also happening with minidumps)
        normalizer = StoreNormalizer(
            remove_other=False, is_renormalize=True, **DEFAULT_STORE_NORMALIZER_ARGS
        )
        data = normalizer.normalize_event(dict(data))

        issues = data.get("processing_issues")

        try:
            if issues and create_failed_event(
                cache_key,
                data,
                project_id,
                list(issues.values()),
                event_id=event_id,
                start_time=start_time,
                reprocessing_rev=reprocessing_rev,
            ):
                return
        except RetryProcessing:
            # If `create_failed_event` indicates that we need to retry we
            # invoke ourselves again.  This happens when the reprocessing
            # revision changed while we were processing.
            _do_preprocess_event(cache_key, data, start_time, event_id, process_task, project)
            return

        cache_key = event_processing_store.store(data)

    submit_save_event(project, cache_key, event_id, start_time, data)
コード例 #53
0
 def has_feature(self, organization, request):
     return features.has(
         "organizations:discover", organization,
         actor=request.user) or features.has("organizations:discover-query",
                                             organization,
                                             actor=request.user)
コード例 #54
0
 def get_max_length(self):
     if features.has("organizations:higher-ownership-limit",
                     self.context["ownership"].project.organization):
         return HIGHER_MAX_RAW_LENGTH
     return MAX_RAW_LENGTH
コード例 #55
0
 def has_chart_interpolation(self, organization: Organization,
                             request: Request) -> bool:
     return features.has("organizations:performance-chart-interpolation",
                         organization,
                         actor=request.user)
コード例 #56
0
 def has_feature(self, organization, request):
     return features.has(
         "organizations:performance-tag-explorer", organization, actor=request.user
     )
コード例 #57
0
def get_sources_for_project(project):
    """
    Returns a list of symbol sources for this project.
    """

    sources = []

    # The symbolicator evaluates sources in the order they are declared. Always
    # try to download symbols from Sentry first.
    project_source = get_internal_source(project)
    sources.append(project_source)

    # Check that the organization still has access to symbol sources. This
    # controls both builtin and external sources.
    organization = project.organization

    if not features.has("organizations:symbol-sources", organization):
        return sources

    # Custom sources have their own feature flag. Check them independently.
    if features.has("organizations:custom-symbol-sources", organization):
        sources_config = project.get_option("sentry:symbol_sources")
    else:
        sources_config = None

    if sources_config:
        try:
            custom_sources = parse_sources(sources_config)
            sources.extend(
                normalize_user_source(source) for source in custom_sources
                if source["type"] != "appStoreConnect")
        except InvalidSourcesError:
            # Source configs should be validated when they are saved. If this
            # did not happen, this indicates a bug. Record this, but do not stop
            # processing at this point.
            logger.error("Invalid symbolicator source config", exc_info=True)

    def resolve_alias(source):
        for key in source.get("sources") or ():
            other_source = settings.SENTRY_BUILTIN_SOURCES.get(key)
            if other_source:
                if other_source.get("type") == "alias":
                    yield from resolve_alias(other_source)
                else:
                    yield other_source

    # Add builtin sources last to ensure that custom sources have precedence
    # over our defaults.
    builtin_sources = project.get_option("sentry:builtin_symbol_sources")
    for key, source in settings.SENTRY_BUILTIN_SOURCES.items():
        if key not in builtin_sources:
            continue

        # special internal alias type expands to more than one item.  This
        # is used to make `apple` expand to `ios`/`macos` and other
        # sources if configured as such.
        if source.get("type") == "alias":
            sources.extend(resolve_alias(source))
        else:
            sources.append(source)

    return sources
コード例 #58
0
    def put(self, request, project):
        """
        Update a Project
        ````````````````

        Update various attributes and configurable settings for the given
        project.  Only supplied values are updated.

        :pparam string organization_slug: the slug of the organization the
                                          project belongs to.
        :pparam string project_slug: the slug of the project to delete.
        :param string name: the new name for the project.
        :param string slug: the new slug for the project.
        :param string team: the slug of new team for the project. Note, will be deprecated
                            soon when multiple teams can have access to a project.
        :param string platform: the new platform for the project.
        :param boolean isBookmarked: in case this API call is invoked with a
                                     user context this allows changing of
                                     the bookmark flag.
        :param int digestsMinDelay:
        :param int digestsMaxDelay:
        :auth: required
        """
        has_project_write = (
            (request.auth and request.auth.has_scope('project:write'))
            or (request.access and request.access.has_scope('project:write')))

        changed_proj_settings = {}

        if has_project_write:
            serializer_cls = ProjectAdminSerializer
        else:
            serializer_cls = ProjectMemberSerializer

        serializer = serializer_cls(
            data=request.DATA,
            partial=True,
            context={
                'project': project,
                'request': request,
            },
        )
        if not serializer.is_valid():
            return Response(serializer.errors, status=400)

        result = serializer.object

        if not has_project_write:
            # options isn't part of the serializer, but should not be editable by members
            for key in chain(six.iterkeys(ProjectAdminSerializer.base_fields),
                             ['options']):
                if request.DATA.get(key) and not result.get(key):
                    return Response(
                        {
                            'detail': [
                                'You do not have permission to perform this action.'
                            ]
                        },
                        status=403)

        changed = False

        old_slug = None
        if result.get('slug'):
            old_slug = project.slug
            project.slug = result['slug']
            changed = True
            changed_proj_settings['new_slug'] = project.slug

        if result.get('name'):
            project.name = result['name']
            changed = True
            changed_proj_settings['new_project'] = project.name

        old_team_id = None
        new_team = None
        if result.get('team'):
            return Response(
                {
                    'detail':
                    ['Editing a team via this endpoint has been deprecated.']
                },
                status=400)

        if result.get('platform'):
            project.platform = result['platform']
            changed = True

        if changed:
            project.save()
            if old_team_id is not None:
                ProjectTeam.objects.filter(
                    project=project,
                    team_id=old_team_id,
                ).update(team=new_team)

            if old_slug:
                ProjectRedirect.record(project, old_slug)

        if result.get('isBookmarked'):
            try:
                with transaction.atomic():
                    ProjectBookmark.objects.create(
                        project_id=project.id,
                        user=request.user,
                    )
            except IntegrityError:
                pass
        elif result.get('isBookmarked') is False:
            ProjectBookmark.objects.filter(
                project_id=project.id,
                user=request.user,
            ).delete()

        if result.get('digestsMinDelay'):
            project.update_option('digests:mail:minimum_delay',
                                  result['digestsMinDelay'])
        if result.get('digestsMaxDelay'):
            project.update_option('digests:mail:maximum_delay',
                                  result['digestsMaxDelay'])
        if result.get('subjectPrefix') is not None:
            if project.update_option('mail:subject_prefix',
                                     result['subjectPrefix']):
                changed_proj_settings['mail:subject_prefix'] = result[
                    'subjectPrefix']
        if result.get('subjectTemplate'):
            project.update_option('mail:subject_template',
                                  result['subjectTemplate'])
        if result.get('scrubIPAddresses') is not None:
            if project.update_option('sentry:scrub_ip_address',
                                     result['scrubIPAddresses']):
                changed_proj_settings['sentry:scrub_ip_address'] = result[
                    'scrubIPAddresses']
        if result.get('securityToken') is not None:
            if project.update_option('sentry:token', result['securityToken']):
                changed_proj_settings['sentry:token'] = result['securityToken']
        if result.get('securityTokenHeader') is not None:
            if project.update_option('sentry:token_header',
                                     result['securityTokenHeader']):
                changed_proj_settings['sentry:token_header'] = result[
                    'securityTokenHeader']
        if result.get('verifySSL') is not None:
            if project.update_option('sentry:verify_ssl', result['verifySSL']):
                changed_proj_settings['sentry:verify_ssl'] = result[
                    'verifySSL']
        if result.get('dataScrubber') is not None:
            if project.update_option('sentry:scrub_data',
                                     result['dataScrubber']):
                changed_proj_settings['sentry:scrub_data'] = result[
                    'dataScrubber']
        if result.get('dataScrubberDefaults') is not None:
            if project.update_option('sentry:scrub_defaults',
                                     result['dataScrubberDefaults']):
                changed_proj_settings['sentry:scrub_defaults'] = result[
                    'dataScrubberDefaults']
        if result.get('sensitiveFields') is not None:
            if project.update_option('sentry:sensitive_fields',
                                     result['sensitiveFields']):
                changed_proj_settings['sentry:sensitive_fields'] = result[
                    'sensitiveFields']
        if result.get('safeFields') is not None:
            if project.update_option('sentry:safe_fields',
                                     result['safeFields']):
                changed_proj_settings['sentry:safe_fields'] = result[
                    'safeFields']
        if result.get('storeCrashReports') is not None:
            if project.update_option('sentry:store_crash_reports',
                                     result['storeCrashReports']):
                changed_proj_settings['sentry:store_crash_reports'] = result[
                    'storeCrashReports']
        if result.get('relayPiiConfig') is not None:
            if project.update_option('sentry:relay_pii_config',
                                     result['relayPiiConfig']):
                changed_proj_settings['sentry:relay_pii_config'] = result[
                    'relayPiiConfig'].strip() or None
        if 'defaultEnvironment' in result:
            if result['defaultEnvironment'] is None:
                project.delete_option('sentry:default_environment')
            else:
                project.update_option('sentry:default_environment',
                                      result['defaultEnvironment'])
        # resolveAge can be None
        if 'resolveAge' in result:
            if project.update_option(
                    'sentry:resolve_age', 0 if result.get('resolveAge') is None
                    else int(result['resolveAge'])):
                changed_proj_settings['sentry:resolve_age'] = result[
                    'resolveAge']
        if result.get('scrapeJavaScript') is not None:
            if project.update_option('sentry:scrape_javascript',
                                     result['scrapeJavaScript']):
                changed_proj_settings['sentry:scrape_javascript'] = result[
                    'scrapeJavaScript']
        if result.get('allowedDomains'):
            if project.update_option('sentry:origins',
                                     result['allowedDomains']):
                changed_proj_settings['sentry:origins'] = result[
                    'allowedDomains']

        if result.get('isSubscribed'):
            UserOption.objects.set_value(user=request.user,
                                         key='mail:alert',
                                         value=1,
                                         project=project)
        elif result.get('isSubscribed') is False:
            UserOption.objects.set_value(user=request.user,
                                         key='mail:alert',
                                         value=0,
                                         project=project)

        # TODO(dcramer): rewrite options to use standard API config
        if has_project_write:
            options = request.DATA.get('options', {})
            if 'sentry:origins' in options:
                project.update_option(
                    'sentry:origins',
                    clean_newline_inputs(options['sentry:origins']))
            if 'sentry:resolve_age' in options:
                project.update_option('sentry:resolve_age',
                                      int(options['sentry:resolve_age']))
            if 'sentry:scrub_data' in options:
                project.update_option('sentry:scrub_data',
                                      bool(options['sentry:scrub_data']))
            if 'sentry:scrub_defaults' in options:
                project.update_option('sentry:scrub_defaults',
                                      bool(options['sentry:scrub_defaults']))
            if 'sentry:safe_fields' in options:
                project.update_option(
                    'sentry:safe_fields',
                    [s.strip().lower() for s in options['sentry:safe_fields']])
            if 'sentry:store_crash_reports' in options:
                project.update_option(
                    'sentry:store_crash_reports',
                    bool(options['sentry:store_crash_reports']))
            if 'sentry:relay_pii_config' in options:
                project.update_option(
                    'sentry:relay_pii_config',
                    options['sentry:relay_pii_config'].strip() or None)
            if 'sentry:sensitive_fields' in options:
                project.update_option('sentry:sensitive_fields', [
                    s.strip().lower()
                    for s in options['sentry:sensitive_fields']
                ])
            if 'sentry:scrub_ip_address' in options:
                project.update_option(
                    'sentry:scrub_ip_address',
                    bool(options['sentry:scrub_ip_address']),
                )
            if 'mail:subject_prefix' in options:
                project.update_option(
                    'mail:subject_prefix',
                    options['mail:subject_prefix'],
                )
            if 'sentry:default_environment' in options:
                project.update_option(
                    'sentry:default_environment',
                    options['sentry:default_environment'],
                )
            if 'sentry:csp_ignored_sources_defaults' in options:
                project.update_option(
                    'sentry:csp_ignored_sources_defaults',
                    bool(options['sentry:csp_ignored_sources_defaults']))
            if 'sentry:csp_ignored_sources' in options:
                project.update_option(
                    'sentry:csp_ignored_sources',
                    clean_newline_inputs(
                        options['sentry:csp_ignored_sources']))
            if 'sentry:blacklisted_ips' in options:
                project.update_option(
                    'sentry:blacklisted_ips',
                    clean_newline_inputs(options['sentry:blacklisted_ips']),
                )
            if 'feedback:branding' in options:
                project.update_option(
                    'feedback:branding',
                    '1' if options['feedback:branding'] else '0')
            if 'sentry:reprocessing_active' in options:
                project.update_option(
                    'sentry:reprocessing_active',
                    bool(options['sentry:reprocessing_active']))
            if 'filters:blacklisted_ips' in options:
                project.update_option(
                    'sentry:blacklisted_ips',
                    clean_newline_inputs(options['filters:blacklisted_ips']))
            if u'filters:{}'.format(FilterTypes.RELEASES) in options:
                if features.has('projects:custom-inbound-filters',
                                project,
                                actor=request.user):
                    project.update_option(
                        u'sentry:{}'.format(FilterTypes.RELEASES),
                        clean_newline_inputs(options[u'filters:{}'.format(
                            FilterTypes.RELEASES)]))
                else:
                    return Response(
                        {'detail': ['You do not have that feature enabled']},
                        status=400)
            if u'filters:{}'.format(FilterTypes.ERROR_MESSAGES) in options:
                if features.has('projects:custom-inbound-filters',
                                project,
                                actor=request.user):
                    project.update_option(
                        u'sentry:{}'.format(FilterTypes.ERROR_MESSAGES),
                        clean_newline_inputs(options[u'filters:{}'.format(
                            FilterTypes.ERROR_MESSAGES)],
                                             case_insensitive=False))
                else:
                    return Response(
                        {'detail': ['You do not have that feature enabled']},
                        status=400)

            self.create_audit_entry(request=request,
                                    organization=project.organization,
                                    target_object=project.id,
                                    event=AuditLogEntryEvent.PROJECT_EDIT,
                                    data=changed_proj_settings)

        data = serialize(project, request.user, DetailedProjectSerializer())
        return Response(data)
コード例 #59
0
def post_process_group(is_new,
                       is_regression,
                       is_new_group_environment,
                       cache_key,
                       group_id=None,
                       **kwargs):
    """
    Fires post processing hooks for a group.
    """
    from sentry.eventstore.models import Event
    from sentry.eventstore.processing import event_processing_store
    from sentry.utils import snuba
    from sentry.reprocessing2 import is_reprocessed_event

    with snuba.options_override({"consistent": True}):
        # We use the data being present/missing in the processing store
        # to ensure that we don't duplicate work should the forwarding consumers
        # need to rewind history.
        data = event_processing_store.get(cache_key)
        if not data:
            logger.info(
                "post_process.skipped",
                extra={
                    "cache_key": cache_key,
                    "reason": "missing_cache"
                },
            )
            return
        event = Event(project_id=data["project"],
                      event_id=data["event_id"],
                      group_id=group_id,
                      data=data)

        set_current_project(event.project_id)

        is_reprocessed = is_reprocessed_event(event.data)

        # NOTE: we must pass through the full Event object, and not an
        # event_id since the Event object may not actually have been stored
        # in the database due to sampling.
        from sentry.models import (
            Commit,
            Project,
            Organization,
            EventDict,
            GroupInboxReason,
        )
        from sentry.models.groupinbox import add_group_to_inbox
        from sentry.models.group import get_group_with_redirect
        from sentry.rules.processor import RuleProcessor
        from sentry.tasks.servicehooks import process_service_hook
        from sentry.tasks.groupowner import process_suspect_commits

        # Re-bind node data to avoid renormalization. We only want to
        # renormalize when loading old data from the database.
        event.data = EventDict(event.data, skip_renormalization=True)

        # Re-bind Project and Org since we're reading the Event object
        # from cache which may contain stale parent models.
        event.project = Project.objects.get_from_cache(id=event.project_id)
        event.project._organization_cache = Organization.objects.get_from_cache(
            id=event.project.organization_id)

        if event.group_id:
            # Re-bind Group since we're reading the Event object
            # from cache, which may contain a stale group and project
            event.group, _ = get_group_with_redirect(event.group_id)
            event.group_id = event.group.id

            event.group.project = event.project
            event.group.project._organization_cache = event.project._organization_cache

        bind_organization_context(event.project.organization)

        _capture_stats(event, is_new)

        from sentry.reprocessing2 import spawn_capture_nodestore_stats

        spawn_capture_nodestore_stats(cache_key, event.project_id,
                                      event.event_id)

        if event.group_id and is_reprocessed and is_new:
            add_group_to_inbox(event.group, GroupInboxReason.REPROCESSED)

        if event.group_id and not is_reprocessed:
            # we process snoozes before rules as it might create a regression
            # but not if it's new because you can't immediately snooze a new group
            has_reappeared = False if is_new else process_snoozes(event.group)
            if not has_reappeared:  # If true, we added the .UNIGNORED reason already
                if is_new:
                    add_group_to_inbox(event.group, GroupInboxReason.NEW)
                elif is_regression:
                    add_group_to_inbox(event.group,
                                       GroupInboxReason.REGRESSION)

            handle_owner_assignment(event.project, event.group, event)

            rp = RuleProcessor(event, is_new, is_regression,
                               is_new_group_environment, has_reappeared)
            has_alert = False
            # TODO(dcramer): ideally this would fanout, but serializing giant
            # objects back and forth isn't super efficient
            for callback, futures in rp.apply():
                has_alert = True
                with sentry_sdk.start_transaction(op="post_process_group",
                                                  name="rule_processor_apply",
                                                  sampled=True):
                    safe_execute(callback,
                                 event,
                                 futures,
                                 _with_transaction=False)

            try:
                lock = locks.get(
                    "w-o:{}-d-l".format(event.group_id),
                    duration=10,
                )
                try:
                    with lock.acquire():
                        has_commit_key = "w-o:{}-h-c".format(
                            event.project.organization_id)
                        org_has_commit = cache.get(has_commit_key)
                        if org_has_commit is None:
                            org_has_commit = Commit.objects.filter(
                                organization_id=event.project.organization_id
                            ).exists()
                            cache.set(has_commit_key, org_has_commit, 3600)

                        if org_has_commit and features.has(
                                "organizations:workflow-owners",
                                event.project.organization,
                        ):
                            process_suspect_commits(event=event)
                except UnableToAcquireLock:
                    pass
            except Exception:
                logger.exception("Failed to process suspect commits")

            if features.has("projects:servicehooks", project=event.project):
                allowed_events = set(["event.created"])
                if has_alert:
                    allowed_events.add("event.alert")

                if allowed_events:
                    for servicehook_id, events in _get_service_hooks(
                            project_id=event.project_id):
                        if any(e in allowed_events for e in events):
                            process_service_hook.delay(
                                servicehook_id=servicehook_id, event=event)

            from sentry.tasks.sentry_apps import process_resource_change_bound

            if event.get_event_type(
            ) == "error" and _should_send_error_created_hooks(event.project):
                process_resource_change_bound.delay(action="created",
                                                    sender="Error",
                                                    instance_id=event.event_id,
                                                    instance=event)
            if is_new:
                process_resource_change_bound.delay(action="created",
                                                    sender="Group",
                                                    instance_id=event.group_id)

            from sentry.plugins.base import plugins

            for plugin in plugins.for_project(event.project):
                plugin_post_process_group(plugin_slug=plugin.slug,
                                          event=event,
                                          is_new=is_new,
                                          is_regresion=is_regression)

            from sentry import similarity

            safe_execute(similarity.record,
                         event.project, [event],
                         _with_transaction=False)

        if event.group_id:
            # Patch attachments that were ingested on the standalone path.
            update_existing_attachments(event)

        if not is_reprocessed:
            event_processed.send_robust(
                sender=post_process_group,
                project=event.project,
                event=event,
                primary_hash=kwargs.get("primary_hash"),
            )

        with metrics.timer("tasks.post_process.delete_event_cache"):
            event_processing_store.delete_by_key(cache_key)
コード例 #60
0
    def get_organization_config(self):
        client = self.get_client()
        instance = self.model.metadata['domain_name']

        project_selector = []

        try:
            projects = client.get_projects(instance)['value']
            all_states = set()

            for idx, project in enumerate(projects):
                project_selector.append({'value': project['id'], 'label': project['name']})
                # only request states for the first 5 projects to limit number
                # of requests
                if idx <= 5:
                    project_states = client.get_work_item_states(instance, project['id'])['value']
                    for state in project_states:
                        all_states.add(state['name'])

            all_states = [(state, state) for state in all_states]
            disabled = False

        except ApiError:
            all_states = []
            disabled = True

        fields = [
            {
                'name': self.outbound_status_key,
                'type': 'choice_mapper',
                'disabled': disabled,
                'label': _('Sync Sentry Status to Azure DevOps'),
                'help': _('When a Sentry issue changes status, change the status of the linked work item in Azure DevOps.'),
                'addButtonText': _('Add Azure DevOps Project'),
                'addDropdown': {
                    'emptyMessage': _('All projects configured'),
                    'noResultsMessage': _('Could not find Azure DevOps project'),
                    'items': project_selector,
                },
                'mappedSelectors': {
                    'on_resolve': {'choices': all_states, 'placeholder': _('Select a status')},
                    'on_unresolve': {'choices': all_states, 'placeholder': _('Select a status')},
                },
                'columnLabels': {
                    'on_resolve': _('When resolved'),
                    'on_unresolve': _('When unresolved'),
                },
                'mappedColumnLabel': _('Azure DevOps Project'),
            },
            {
                'name': self.outbound_assignee_key,
                'type': 'boolean',
                'label': _('Sync Sentry Assignment to Azure DevOps'),
                'help': _('When an issue is assigned in Sentry, assign its linked Azure DevOps work item to the same user.'),
            },
            {
                'name': self.comment_key,
                'type': 'boolean',
                'label': _('Sync Sentry Comments to Azure DevOps'),
                'help': _('Post comments from Sentry issues to linked Azure DevOps work items'),
            },
            {
                'name': self.inbound_status_key,
                'type': 'boolean',
                'label': _('Sync Azure DevOps Status to Sentry'),
                'help': _('When a Azure DevOps work item is marked done, resolve its linked issue in Sentry. '
                          'When a Azure DevOps work item is removed from being done, unresolve its linked Sentry issue.'
                          ),
            },
            {
                'name': self.inbound_assignee_key,
                'type': 'boolean',
                'label': _('Sync Azure DevOps Assignment to Sentry'),
                'help': _('When a work item is assigned in Azure DevOps, assign its linked Sentry issue to the same user.'),
            },
        ]

        organization = Organization.objects.get(id=self.organization_id)
        has_issue_sync = features.has('organizations:integrations-issue-sync',
                                      organization)
        if not has_issue_sync:
            for field in fields:
                field['disabled'] = True
                field['disabledReason'] = _(
                    'Your organization does not have access to this feature'
                )

        return fields