def get_quotas(self, project, key=None, keys=None): if key: key.project = project results = [] pquota = self.get_project_quota(project) if pquota[0] is not None: results.append( QuotaConfig( id="p", scope=QuotaScope.PROJECT, scope_id=project.id, categories=DataCategory.error_categories(), limit=pquota[0], window=pquota[1], reason_code="project_quota", ) ) oquota = self.get_organization_quota(project.organization) if oquota[0] is not None: results.append( QuotaConfig( id="o", scope=QuotaScope.ORGANIZATION, scope_id=project.organization.id, categories=DataCategory.error_categories(), limit=oquota[0], window=oquota[1], reason_code="org_quota", ) ) if key and not keys: keys = [key] elif not keys: keys = [] for key in keys: kquota = self.get_key_quota(key) if kquota[0] is not None: results.append( QuotaConfig( id="k", scope=QuotaScope.KEY, scope_id=key.id, categories=DataCategory.error_categories(), limit=kquota[0], window=kquota[1], reason_code="key_quota", ) ) return results
def build_project_usage_outcomes(start__stop, project): start, stop = start__stop # XXX(epurkhiser): Tsdb used to use day buckets, where the end would # represent a whole day. Snuba queries more accurately thus we must # capture the entire last day end = stop + timedelta(days=1) query = Query( dataset=Dataset.Outcomes.value, match=Entity("outcomes"), select=[ Column("outcome"), Column("category"), Function("sum", [Column("quantity")], "total"), ], where=[ Condition(Column("timestamp"), Op.GTE, start), Condition(Column("timestamp"), Op.LT, end), Condition(Column("project_id"), Op.EQ, project.id), Condition(Column("org_id"), Op.EQ, project.organization_id), Condition( Column("outcome"), Op.IN, [Outcome.ACCEPTED, Outcome.FILTERED, Outcome.RATE_LIMITED]), Condition( Column("category"), Op.IN, [*DataCategory.error_categories(), DataCategory.TRANSACTION], ), ], groupby=[Column("outcome"), Column("category")], granularity=Granularity(ONE_DAY), ) data = raw_snql_query(query, referrer="reports.outcomes")["data"] return ( # Accepted errors sum(row["total"] for row in data if row["category"] in DataCategory.error_categories() and row["outcome"] == Outcome.ACCEPTED), # Dropped errors sum(row["total"] for row in data if row["category"] in DataCategory.error_categories() and row["outcome"] == Outcome.RATE_LIMITED), # accepted transactions sum(row["total"] for row in data if row["category"] == DataCategory.TRANSACTION and row["outcome"] == Outcome.ACCEPTED), # Dropped transactions sum(row["total"] for row in data if row["category"] == DataCategory.TRANSACTION and row["outcome"] == Outcome.RATE_LIMITED), )
def map_row(self, row: MutableMapping[str, Any]) -> None: if "category" in row: category = ( DataCategory.ERROR if row["category"] in DataCategory.error_categories() else DataCategory(row["category"]) ) row["category"] = category.api_name()
def resolve_filter(self, raw_filter: Sequence[str]) -> List[DataCategory]: resolved_categories = set() for category in raw_filter: # combine DEFAULT, ERROR, and SECURITY as errors. # see relay: py/sentry_relay/consts.py and relay-cabi/include/relay.h parsed_category = DataCategory.parse(category) if parsed_category is None: raise InvalidField(f'Invalid category: "{category}"') elif parsed_category == DataCategory.ERROR: resolved_categories.update(DataCategory.error_categories()) else: resolved_categories.add(parsed_category) if DataCategory.ATTACHMENT in resolved_categories and len(resolved_categories) > 1: raise InvalidQuery("if filtering by attachment no other category may be present") return list(resolved_categories)
def build_project_series(start__stop, project): start, stop = start__stop rollup = ONE_DAY resolution, series = tsdb.get_optimal_rollup_series(start, stop, rollup) assert resolution == rollup, "resolution does not match requested value" clean = partial(clean_series, start, stop, rollup) def zerofill_clean(data): return clean(zerofill(data, start, stop, rollup, fill_default=0)) # Note: this section can be removed issue_ids = project.group_set.filter(status=GroupStatus.RESOLVED, resolved_at__gte=start, resolved_at__lt=stop).values_list( "id", flat=True) # TODO: The TSDB calls could be replaced with a SnQL call here tsdb_range_resolved = _query_tsdb_groups_chunked(tsdb.get_range, issue_ids, start, stop, rollup) resolved_error_series = reduce( merge_series, map(clean, tsdb_range_resolved.values()), clean([(timestamp, 0) for timestamp in series]), ) # end # Use outcomes to compute total errors and transactions outcomes_query = Query( dataset=Dataset.Outcomes.value, match=Entity("outcomes"), select=[ Column("time"), Column("category"), Function("sum", [Column("quantity")], "total"), ], where=[ Condition(Column("timestamp"), Op.GTE, start), Condition(Column("timestamp"), Op.LT, stop + timedelta(days=1)), Condition(Column("project_id"), Op.EQ, project.id), Condition(Column("org_id"), Op.EQ, project.organization_id), Condition(Column("outcome"), Op.EQ, Outcome.ACCEPTED), Condition( Column("category"), Op.IN, [*DataCategory.error_categories(), DataCategory.TRANSACTION], ), ], groupby=[Column("time"), Column("category")], granularity=Granularity(rollup), orderby=[OrderBy(Column("time"), Direction.ASC)], ) outcome_series = raw_snql_query(outcomes_query, referrer="reports.outcome_series") total_error_series = OrderedDict() for v in outcome_series["data"]: if v["category"] in DataCategory.error_categories(): timestamp = int(to_timestamp(parse_snuba_datetime(v["time"]))) total_error_series[timestamp] = total_error_series.get( timestamp, 0) + v["total"] total_error_series = zerofill_clean(list(total_error_series.items())) transaction_series = [(int(to_timestamp(parse_snuba_datetime(v["time"]))), v["total"]) for v in outcome_series["data"] if v["category"] == DataCategory.TRANSACTION] transaction_series = zerofill_clean(transaction_series) error_series = merge_series( resolved_error_series, total_error_series, lambda resolved, total: (resolved, total - resolved), # Resolved, Unresolved ) # Format of this series: [(resolved , unresolved, transactions)] return merge_series( error_series, transaction_series, lambda errors, transactions: errors + (transactions, ), )
SnubaModelQuerySettings = collections.namedtuple( # `dataset` - the dataset in Snuba that we want to query # `groupby` - the column in Snuba that we want to put in the group by statement # `aggregate` - the column in Snuba that we want to run the aggregate function on # `conditions` - any additional model specific conditions we want to pass in the query "SnubaModelSettings", ["dataset", "groupby", "aggregate", "conditions"], ) # combine DEFAULT, ERROR, and SECURITY as errors. We are now recording outcome by # category, and these TSDB models and where they're used assume only errors. # see relay: py/sentry_relay/consts.py and relay-cabi/include/relay.h OUTCOMES_CATEGORY_CONDITION = [ "category", "IN", DataCategory.error_categories(), ] class SnubaTSDB(BaseTSDB): """ A time series query interface to Snuba Write methods are not supported, as the raw data from which we generate our time series is assumed to already exist in snuba. Read methods are supported only for models based on group/event data and will return empty results for unsupported models. """ # Since transactions are currently (and temporarily) written to Snuba's events storage we need to