def update_organization_config(self, data): if "service_table" in data: service_rows = data["service_table"] # validate fields bad_rows = filter(lambda x: not x["service"] or not x["integration_key"], service_rows) if bad_rows: raise IntegrationError("Name and key are required") with transaction.atomic(): exising_service_items = PagerDutyService.objects.filter( organization_integration=self.org_integration ) for service_item in exising_service_items: # find the matching row from the input matched_rows = filter(lambda x: x["id"] == service_item.id, service_rows) if matched_rows: matched_row = matched_rows[0] service_item.integration_key = matched_row["integration_key"] service_item.service_name = matched_row["service"] service_item.save() else: service_item.delete() # new rows don't have an id new_rows = filter(lambda x: not x["id"], service_rows) for row in new_rows: service_name = row["service"] key = row["integration_key"] PagerDutyService.objects.create( organization_integration=self.org_integration, service_name=service_name, integration_key=key, )
def get(self, request, group): limit = request.GET.get("limit", None) if limit is not None: limit = int(limit) + 1 # the target group will always be included results = filter( lambda group_id__scores: group_id__scores[0] != group.id, features.compare(group, limit=limit), ) serialized_groups = apply_values( functools.partial(serialize, user=request.user), Group.objects.in_bulk([group_id for group_id, scores in results]), ) # TODO(tkaemming): This should log when we filter out a group that is # unable to be retrieved from the database. (This will soon be # unexpected behavior, but still possible.) return Response( filter( lambda group_id__scores: group_id__scores[0] is not None, map( lambda group_id__scores: ( serialized_groups.get(group_id__scores[0]), group_id__scores[1], ), results, ), ) )
def find_measurements_min_max(measurements, min_value, max_value, user_query, params): """ Find the min/max value of the specified measurements. If either min/max is already specified, it will be used and not queried for. :param [str] measurements: The list of measurements for which you want to generate the histograms for. :param float min_value: The minimum value allowed to be in the histogram. If left unspecified, it is queried using `user_query` and `params`. :param float max_value: The maximum value allowed to be in the histogram. If left unspecified, it is queried using `user_query` and `params`. :param str user_query: Filter query string to create conditions from. :param {str: str} params: Filtering parameters with start, end, project_id, environment """ if min_value is not None and max_value is not None: return min_value, max_value min_columns, max_columns = [], [] for measurement in measurements: if min_value is None: min_columns.append("min(measurements.{})".format(measurement)) if max_value is None: max_columns.append("max(measurements.{})".format(measurement)) results = query( selected_columns=min_columns + max_columns, query=user_query, params=params, limit=1, referrer="api.organization-events-measurements-min-max", auto_fields=True, use_aggregate_conditions=True, ) data = results.get("data") # there should be exactly 1 row in the results, but if something went wrong here, # we force the min/max to be None to coerce an empty histogram if data is None or len(data) != 1: return None, None row = data[0] if min_value is None: min_values = [ row[get_function_alias(column)] for column in min_columns ] min_values = list(filter(lambda v: v is not None, min_values)) min_value = min(min_values) if min_values else None if max_value is None: max_values = [ row[get_function_alias(column)] for column in max_columns ] max_values = list(filter(lambda v: v is not None, max_values)) max_value = max(max_values) if max_values else None return min_value, max_value
def test_get(self): project = self.create_project() issues = plugins.get("issuetrackingplugin2") with patch.object(issues, "is_hidden", return_value=True): self.login_as(user=self.user) url = reverse( "sentry-api-0-project-plugins", kwargs={ "organization_slug": project.organization.slug, "project_slug": project.slug, }, ) response = self.client.get(url) assert response.status_code == 200, (response.status_code, response.content) assert len(response.data) >= 9 auto_tag = filter(lambda p: p["slug"] == "browsers", response.data)[0] assert auto_tag["name"] == "Auto Tag: Browsers" assert auto_tag["enabled"] is True assert auto_tag["isHidden"] is False self.assert_plugin_shape(auto_tag) issues = filter(lambda p: p["slug"] == "issuetrackingplugin2", response.data)[0] assert issues["name"] == "IssueTrackingPlugin2" assert issues["enabled"] is False assert issues["isHidden"] is True self.assert_plugin_shape(issues)
def serialize(self, obj, attrs, user, **kwargs): environment = attrs["environment"] all_conditions = [ dict( list(o.items()) + [("name", _generate_rule_label(obj.project, obj, o))]) for o in obj.data.get("conditions", []) ] d = { # XXX(dcramer): we currently serialize unsaved rule objects # as part of the rule editor "id": str(obj.id) if obj.id else None, # conditions pertain to criteria that can trigger an alert "conditions": filter(lambda condition: not _is_filter(condition), all_conditions), # filters are not new conditions but are the subset of conditions that pertain to event attributes "filters": filter(lambda condition: _is_filter(condition), all_conditions), "actions": [ dict( list(o.items()) + [("name", _generate_rule_label(obj.project, obj, o))]) for o in obj.data.get("actions", []) ], "actionMatch": obj.data.get("action_match") or Rule.DEFAULT_CONDITION_MATCH, "filterMatch": obj.data.get("filter_match") or Rule.DEFAULT_FILTER_MATCH, "frequency": obj.data.get("frequency") or Rule.DEFAULT_FREQUENCY, "name": obj.label, "dateCreated": obj.date_added, "owner": attrs.get("owner", None), "createdBy": attrs.get("created_by", None), "environment": environment.name if environment is not None else None, "projects": [obj.project.slug], } if "last_triggered" in attrs: d["lastTriggered"] = attrs["last_triggered"] return d
def resolve_many( cls, actors: Sequence["ActorTuple"]) -> Sequence[Union["Team", "User"]]: """ Resolve multiple actors at the same time. Returns the result in the same order as the input, minus any actors we couldn't resolve. :param actors: :return: """ if not actors: return [] actors_by_type = defaultdict(list) for actor in actors: actors_by_type[actor.type].append(actor) results = {} for type, _actors in actors_by_type.items(): for instance in type.objects.filter(id__in=[a.id for a in _actors]): results[(type, instance.id)] = instance return list( filter(None, [results.get((actor.type, actor.id)) for actor in actors]))
def process_request(self, request): # Our health check can't be a done as a view, because we need # to bypass the ALLOWED_HOSTS check. We need to do this # since not all load balancers can send the expected Host header # which would cause a 400 BAD REQUEST, marking the node dead. # Instead, we just intercept the request at this point, and return # our success/failure immediately. if request.path != "/_health/": return if "full" not in request.GET: return HttpResponse("ok", content_type="text/plain") from sentry.status_checks import Problem, check_all from sentry.utils import json threshold = Problem.threshold(Problem.SEVERITY_CRITICAL) results = { check: filter(threshold, problems) for check, problems in check_all().items() } problems = list(itertools.chain.from_iterable(results.values())) return HttpResponse( json.dumps({ "problems": [six.text_type(p) for p in problems], "healthy": {type(check).__name__: not p for check, p in results.items()}, }), content_type="application/json", status=(500 if problems else 200), )
def get_group_seen_values_for_environments( self, project_ids, group_id_list, environment_ids, snuba_filters, start=None, end=None ): # Get the total times seen, first seen, and last seen across multiple environments filters = {"project_id": project_ids, "group_id": group_id_list} if environment_ids: filters["environment"] = environment_ids aggregations = [ ["count()", "", "times_seen"], ["min", SEEN_COLUMN, "first_seen"], ["max", SEEN_COLUMN, "last_seen"], ] result = snuba.aliased_query( dataset=snuba.Dataset.Events, start=start, end=end, groupby=["group_id"], conditions=snuba_filters, filter_keys=filters, aggregations=aggregations, referrer="tagstore.get_group_seen_values_for_environments", ) return { issue["group_id"]: fix_tag_value_data( dict(filter(lambda key: key[0] != "group_id", six.iteritems(issue))) ) for issue in result["data"] }
def get(self, request, organization, integration_id): try: integration = self._get_integration(organization, integration_id) except Integration.DoesNotExist: return Response(status=404) installation = integration.get_installation(organization.id) jira_client = installation.get_client() field = request.GET.get("field") query = request.GET.get("query") if field is None: return Response({"detail": "field is a required parameter"}, status=400) if not query: return Response({"detail": "query is a required parameter"}, status=400) if field in ("externalIssue", "parent"): if not query: return Response([]) try: resp = installation.search_issues(query) except IntegrationError as e: return Response({"detail": str(e)}, status=400) return Response( [ {"label": "(%s) %s" % (i["key"], i["fields"]["summary"]), "value": i["key"]} for i in resp.get("issues", []) ] ) if field in ("assignee", "reporter"): try: response = jira_client.search_users_for_project( request.GET.get("project", ""), query ) except (ApiUnauthorized, ApiError): return Response({"detail": "Unable to fetch users from Jira"}, status=400) user_tuples = filter( None, [build_user_choice(user, jira_client.user_id_field()) for user in response] ) users = [{"value": user_id, "label": display} for user_id, display in user_tuples] return Response(users) try: response = jira_client.get_field_autocomplete(name=field, value=query) except (ApiUnauthorized, ApiError): return Response( {"detail": "Unable to fetch autocomplete for {} from Jira".format(field)}, status=400, ) choices = [ { "value": result["value"], # Jira's response will highlight the matching substring in the name using HTML formatting. "label": BeautifulSoup(result["displayName"], "html.parser").get_text(), } for result in response["results"] ] return Response(choices)
def handle_member_added(self, request): data = request.data channel_data = data["channelData"] # only care if our bot is the new member added matches = filter(lambda x: x["id"] == data["recipient"]["id"], data["membersAdded"]) if not matches: return self.respond(status=204) team = channel_data["team"] # TODO: add try/except for request exceptions access_token = get_token_data()["access_token"] # need to keep track of the service url since we won't get it later signed_data = { "team_id": team["id"], "team_name": team["name"], "service_url": data["serviceUrl"], "expiration_time": int(time.time()) + INSTALL_EXPIRATION_TIME, } # sign the params so this can't be forged signed_params = sign(**signed_data) # send welcome message to the team client = MsTeamsPreInstallClient(access_token, data["serviceUrl"]) card = build_welcome_card(signed_params) client.send_card(team["id"], card) return self.respond(status=201)
def get(self, request, organization): def is_provider_enabled(provider): if not provider.requires_feature_flag: return True feature_flag_name = "organizations:integrations-%s" % provider.key return features.has(feature_flag_name, organization, actor=request.user) providers = filter(is_provider_enabled, list(integrations.all())) providers.sort(key=lambda i: i.key) serialized = serialize(providers, organization=organization, serializer=IntegrationProviderSerializer()) if "provider_key" in request.GET: serialized = [ d for d in serialized if d["key"] == request.GET["provider_key"] ] if not serialized: return Response({"detail": "Providers do not exist"}, status=404) return Response({"providers": serialized})
def post(self, request): # verify_signature will raise the exception corresponding to the error verify_signature(request) data = request.data channel_data = data["channelData"] event = channel_data.get("eventType") # TODO: Handle other events if event == "teamMemberAdded": # only care if our bot is the new member added matches = filter(lambda x: x["id"] == data["recipient"]["id"], data["membersAdded"]) if matches: team_id = channel_data["team"]["id"] access_token = get_token_data()["access_token"] # need to keep track of the service url since we won't get it later signed_data = { "team_id": team_id, "service_url": data["serviceUrl"] } # sign the params so this can't be forged signed_params = sign(**signed_data) # send welcome message to the team client = MsTeamsPreInstallClient(access_token, data["serviceUrl"]) client.send_welcome_message(team_id, signed_params) return self.respond(status=200)
def dispatch(self, request, pipeline): # the previous pipeline step will have be POST and will reach this line here # we need to check our state to determine what to do if request.method == "POST" and pipeline.fetch_state( "ready_for_enabled_lambdas_post"): # accept form data or json data data = request.POST or json.loads(request.body) pipeline.bind_state("enabled_lambdas", data) return pipeline.next_step() # bind the state now so we are ready to accept the enabled_lambdas in the post pdy pipeline.bind_state("ready_for_enabled_lambdas_post", True) arn = pipeline.fetch_state("arn") aws_external_id = pipeline.fetch_state("aws_external_id") lambda_client = gen_aws_client(arn, aws_external_id) lambda_functions = filter( lambda x: x.get("Runtime") in SUPPORTED_RUNTIMES, lambda_client.list_functions()["Functions"], ) return self.render_react_view(request, "awsLambdaFunctionSelect", {"lambdaFunctions": lambda_functions})
def post(self, request): is_valid = verify_signature(request) if not is_valid: logger.error("msteams.webhook.invalid-signature") return self.respond(status=401) data = request.data channel_data = data["channelData"] event = channel_data.get("eventType") # TODO: Handle other events if event == "teamMemberAdded": # only care if our bot is the new member added matches = filter(lambda x: x["id"] == data["recipient"]["id"], data["membersAdded"]) if matches: # send welcome message to the team team_id = channel_data["team"]["id"] client = MsTeamsClient() # sign the params so this can't be forged signed_params = sign(team_id=team_id) url = u"%s?signed_params=%s" % ( absolute_uri("/extensions/msteams/configure/"), signed_params, ) # TODO: Better message payload = { "type": "message", "text": url, } client.send_message(team_id, payload) return self.respond(status=200)
def handle_team_member_added(self, request): data = request.data channel_data = data["channelData"] # only care if our bot is the new member added matches = filter(lambda x: x["id"] == data["recipient"]["id"], data["membersAdded"]) if not matches: return self.respond(status=204) team = channel_data["team"] # need to keep track of the service url since we won't get it later signed_data = { "team_id": team["id"], "team_name": team["name"], "service_url": data["serviceUrl"], } # sign the params so this can't be forged signed_params = sign(**signed_data) # send welcome message to the team client = get_preinstall_client(data["serviceUrl"]) card = build_welcome_card(signed_params) client.send_card(team["id"], card) return self.respond(status=201)
def validate_allowedDomains(self, value): value = filter(bool, value) if len(value) == 0: raise serializers.ValidationError( "Empty value will block all requests, use * to accept from all domains" ) return value
def get_unmigratable_repositories(self): repos = Repository.objects.filter(organization_id=self.organization_id, provider="bitbucket_server") accessible_repos = [r["identifier"] for r in self.get_repositories()] return filter(lambda repo: repo.name not in accessible_repos, repos)
def get(self, request, organization): integrations = OrganizationIntegration.objects.filter( organization=organization, status=ObjectStatus.VISIBLE ) if "provider_key" in request.GET: integrations = integrations.filter(integration__provider=request.GET["provider_key"]) # XXX(meredith): Filter out workspace apps if there are any. if not features.has( "organizations:slack-allow-workspace", organization=organization, actor=request.user ): slack_integrations = integrations.filter(integration__provider="slack") workspace_ids = [ workspace_app.id for workspace_app in filter( lambda i: get_integration_type(i.integration) == "workspace_app", slack_integrations, ) ] integrations = integrations.exclude(id__in=workspace_ids) # include the configurations by default if no param include_config = True if request.GET.get("includeConfig") == "0": include_config = False return self.paginate( queryset=integrations, request=request, order_by="integration__name", on_results=lambda x: serialize(x, request.user, include_config=include_config), paginator_cls=OffsetPaginator, )
def get_path(data, *path, **kwargs): """ Safely resolves data from a recursive data structure. A value is only returned if the full path exists, otherwise ``None`` is returned. If the ``default`` argument is specified, it is returned instead of ``None``. If the ``filter`` argument is specified and the value is a list, it is filtered with the given callback. Alternatively, pass ``True`` as filter to only filter ``None`` values. """ default = kwargs.pop("default", None) f = kwargs.pop("filter", None) for k in kwargs: raise TypeError("get_path() got an undefined keyword argument '%s'" % k) for p in path: if isinstance(data, Mapping) and p in data: data = data[p] elif isinstance(data, (list, tuple)) and isinstance( p, int) and -len(data) <= p < len(data): data = data[p] else: return default if f and data and isinstance(data, (list, tuple)): data = filter((lambda x: x is not None) if f is True else f, data) return data if data is not None else default
def get(self, request: Request, project) -> Response: """ Retrieve a Project `````````````````` Return details on an individual project. :pparam string organization_slug: the slug of the organization the project belongs to. :pparam string project_slug: the slug of the project to retrieve. :auth: required """ data = serialize(project, request.user, DetailedProjectSerializer()) # TODO: should switch to expand and move logic into the serializer include = set(filter(bool, request.GET.get("include", "").split(","))) if "stats" in include: data["stats"] = {"unresolved": self._get_unresolved_count(project)} expand = request.GET.getlist("expand", []) if "hasAlertIntegration" in expand: data["hasAlertIntegrationInstalled"] = has_alert_integration( project) return Response(data)
def deliver_organization_user_report(timestamp, duration, organization_id, user_id, dry_run=False): try: organization = _get_organization_queryset().get(id=organization_id) except Organization.DoesNotExist: logger.warning( "reports.organization.missing", extra={ "timestamp": timestamp, "duration": duration, "organization_id": organization_id, }, ) return user = User.objects.get(id=user_id) if not user_subscribed_to_organization_reports(user, organization): logger.debug( "Skipping report for %r to %r, user is not subscribed to reports.", organization, user ) return Skipped.NotSubscribed projects = set() for team in Team.objects.get_for_user(organization, user): projects.update(Project.objects.get_for_user(team, user, _skip_team_check=True)) if not projects: logger.debug( "Skipping report for %r to %r, user is not associated with any projects.", organization, user, ) return Skipped.NoProjects interval = _to_interval(timestamp, duration) projects = list(projects) inclusion_predicates = [ lambda interval, project__report: project__report[1] is not None, has_valid_aggregates, ] reports = dict( filter( lambda item: all(predicate(interval, item) for predicate in inclusion_predicates), zip(projects, backend.fetch(timestamp, duration, organization, projects)), ) ) if not reports: logger.debug( "Skipping report for %r to %r, no qualifying reports to deliver.", organization, user ) return Skipped.NoReports message = build_message(timestamp, duration, organization, user, reports) if not dry_run: message.send()
def get_supported_functions(lambda_client): paginator = lambda_client.get_paginator("list_functions") response_iterator = paginator.paginate() functions = [] for page in response_iterator: functions += page["Functions"] return filter(lambda x: x.get("Runtime") in SUPPORTED_RUNTIMES, functions,)
def get(self, request, project): # should probably feature gate filepath = request.GET.get("file") if not filepath: return Response({"detail": "Filepath is required"}, status=400) commitId = request.GET.get("commitId") platform = request.GET.get("platform") result = {"config": None, "sourceUrl": None} integrations = Integration.objects.filter(organizations=project.organization_id) # TODO(meredith): should use get_provider.has_feature() instead once this is # no longer feature gated and is added as an IntegrationFeature result["integrations"] = [ serialize(i, request.user) for i in filter(lambda i: i.get_provider().has_stacktrace_linking, integrations) ] # xxx(meredith): if there are ever any changes to this query, make # sure that we are still ordering by `id` because we want to make sure # the ordering is deterministic configs = RepositoryProjectPathConfig.objects.filter(project=project) with configure_scope() as scope: for config in configs: result["config"] = serialize(config, request.user) # use the provider key to be able to spilt up stacktrace # link metrics by integration type provider = result["config"]["provider"]["key"] scope.set_tag("integration_provider", provider) scope.set_tag("stacktrace_link.platform", platform) if not filepath.startswith(config.stack_root): scope.set_tag("stacktrace_link.error", "stack_root_mismatch") result["error"] = "stack_root_mismatch" continue link = get_link(config, filepath, config.default_branch, commitId) # it's possible for the link to be None, and in that # case it means we could not find a match for the # configuration result["sourceUrl"] = link if not link: scope.set_tag("stacktrace_link.found", False) scope.set_tag("stacktrace_link.error", "file_not_found") result["error"] = "file_not_found" else: scope.set_tag("stacktrace_link.found", True) # if we found a match, we can break break return Response(result)
def test_simple(self): self.login_as(user=self.user) org = self.create_organization(owner=self.user, name="baz") url = reverse("sentry-api-0-organization-config-repositories", args=[org.slug]) response = self.client.get(url, format="json") assert response.status_code == 200, response.content provider = filter(lambda x: x["id"] == "dummy", response.data["providers"])[0] assert provider["name"] == "Example" assert provider["config"]
def handle_personal_member_add(self, request): data = request.data # only care if our bot is the new member added matches = filter(lambda x: x["id"] == data["recipient"]["id"], data["membersAdded"]) if not matches: return self.respond(status=204) client = get_preinstall_client(data["serviceUrl"]) user_conversation_id = data["conversation"]["id"] card = build_personal_installation_message() client.send_card(user_conversation_id, card) return self.respond(status=204)
def get_channel_id(organization, integration_id, name): try: integration = Integration.objects.get(provider="msteams", organizations=organization, id=integration_id) except Integration.DoesNotExist: return None team_id = integration.external_id client = MsTeamsClient(integration) # handle searching for channels first channel_list = client.get_channel_list(team_id) filtered_channels = list( filter(lambda x: channel_filter(x, name), channel_list)) if len(filtered_channels) > 0: return filtered_channels[0].get("id") # handle searching for users members = client.get_member_list(team_id, None) for i in range(MSTEAMS_MAX_ITERS): member_list = members.get("members") continuation_token = members.get("continuationToken") filtered_members = list( filter(lambda x: x.get("name").lower() == name.lower(), member_list)) if len(filtered_members) > 0: # TODO: handle duplicate username case user_id = filtered_members[0].get("id") tenant_id = filtered_members[0].get("tenantId") return client.get_user_conversation_id(user_id, tenant_id) if not continuation_token: return None members = client.get_member_list(team_id, continuation_token) return None
def dispatch(self, request, pipeline): organization = pipeline.organization # TODO: make project selection part of flow project = Project.objects.filter(organization=organization, platform="node-awslambda").first() if not project: raise IntegrationError("No valid project") enabled_dsn = ProjectKey.get_default(project=project) if not enabled_dsn: raise IntegrationError("Project does not have DSN enabled") sentry_project_dsn = enabled_dsn.get_dsn(public=True) arn = pipeline.fetch_state("arn") aws_external_id = pipeline.fetch_state("aws_external_id") lambda_client = gen_aws_lambda_client(arn, aws_external_id) lambda_functions = filter( lambda x: x.get("Runtime") in SUPPORTED_RUNTIMES, lambda_client.list_functions()["Functions"], ) for function in lambda_functions: name = function["FunctionName"] # TODO: load existing layers and environment and append to them try: lambda_client.update_function_configuration( FunctionName=name, Layers=[options.get("aws-lambda.node-layer-arn")], Environment={ "Variables": { "NODE_OPTIONS": "-r @sentry/serverless/dist/auto", "SENTRY_DSN": sentry_project_dsn, "SENTRY_TRACES_SAMPLE_RATE": "1.0", } }, ) except Exception as e: logger.info( "update_function_configuration.error", extra={ "organization_id": organization.id, "lambda_name": name, "arn": arn, "error": six.text_type(e), }, ) return pipeline.next_step()
def build_key_transactions_ctx(key_events, organization, projects): # Todo: use projects arg? # Fetch projects project_id_to_project = {} for project in projects: project_id_to_project[project.id] = project return [{ "name": e[0], "count": e[1], "project": project_id_to_project[e[2]], "p95": e[3], "p95_prev_week": e[4], } for e in filter(lambda e: e[2] in project_id_to_project, key_events)]
def build_key_errors_ctx(key_events, organization): # Join with DB groups = Group.objects.filter(id__in=map(lambda i: i[0], key_events), ).all() group_id_to_group_history = defaultdict( lambda: (GroupHistoryStatus.NEW, "New Issue")) group_history = (GroupHistory.objects.filter( group__id__in=map(lambda i: i[0], key_events), organization=organization).order_by("date_added").all()) # The order_by ensures that the group_id_to_group_history contains the latest GroupHistory entry for g in group_history: group_id_to_group_history[g.group.id] = (g.status, g.get_status_display()) group_id_to_group = {} for group in groups: group_id_to_group[group.id] = group status_to_color = { GroupHistoryStatus.UNRESOLVED: "#FAD473", GroupHistoryStatus.RESOLVED: "#8ACBBC", GroupHistoryStatus.SET_RESOLVED_IN_RELEASE: "#8ACBBC", GroupHistoryStatus.SET_RESOLVED_IN_COMMIT: "#8ACBBC", GroupHistoryStatus.SET_RESOLVED_IN_PULL_REQUEST: "#8ACBBC", GroupHistoryStatus.AUTO_RESOLVED: "#8ACBBC", GroupHistoryStatus.IGNORED: "#DBD6E1", GroupHistoryStatus.UNIGNORED: "#FAD473", GroupHistoryStatus.ASSIGNED: "#FAAAAC", GroupHistoryStatus.UNASSIGNED: "#FAD473", GroupHistoryStatus.REGRESSED: "#FAAAAC", GroupHistoryStatus.DELETED: "#DBD6E1", GroupHistoryStatus.DELETED_AND_DISCARDED: "#DBD6E1", GroupHistoryStatus.REVIEWED: "#FAD473", GroupHistoryStatus.NEW: "#FAD473", } return [ { "group": group_id_to_group[e[0]], "count": e[1], # For new issues, group history would be None and we default to Unresolved "status": group_id_to_group_history[e[0]][1], "status_color": status_to_color.get(group_id_to_group_history[e[0]][0], "#DBD6E1"), } for e in filter(lambda e: e[0] in group_id_to_group, key_events) ]
def validate_source_url(self, source_url): # first check to see if we are even looking at the same file stack_path = self.initial_data["stack_path"] stack_file = stack_path.split("/")[-1] source_file = source_url.split("/")[-1] if stack_file != source_file: raise serializers.ValidationError( "Source code URL points to a different file than the stack trace" ) def integration_match(integration): return source_url.startswith("https://{}".format( integration.metadata["domain_name"])) def repo_match(repo): return source_url.startswith(repo.url) # now find the matching integration integrations = Integration.objects.filter(organizations=self.org_id, provider__in=self.providers) matching_integrations = filter(integration_match, integrations) if not matching_integrations: raise serializers.ValidationError("Could not find integration") self.integration = matching_integrations[0] # now find the matching repo repos = Repository.objects.filter(integration_id=self.integration.id) matching_repos = filter(repo_match, repos) if not matching_repos: raise serializers.ValidationError("Could not find repo") # store the repo we found self.repo = matching_repos[0] return source_url