Пример #1
0
class PromptsActivitySerializer(serializers.Serializer):
    feature = serializers.CharField(required=True)
    status = serializers.ChoiceField(choices=zip(VALID_STATUSES, VALID_STATUSES), required=True)

    def validate_feature(self, value):
        if value is None:
            raise serializers.ValidationError("Must specify feature name")
        if not prompt_config.has(value):
            raise serializers.ValidationError("Not a valid feature prompt")
        return value
Пример #2
0
 def __init__(self, data, reverse=False, max_limit=MAX_LIMIT, on_results=None):
     self.scores, self.values = (
         map(list, zip(*sorted(data, reverse=reverse))) if data else ([], [])
     )
     self.reverse = reverse
     self.search = functools.partial(
         reverse_bisect_left if reverse else bisect.bisect_left, self.scores
     )
     self.max_limit = max_limit
     self.on_results = on_results
Пример #3
0
    def assert_dashboard_and_widgets(self, widget_ids):
        assert Dashboard.objects.filter(
            organization=self.organization, id=self.dashboard.id
        ).exists()

        widgets = self.get_widgets(self.dashboard)
        assert len(widgets) == len(list(widget_ids))

        for widget, id in zip(widgets, widget_ids):
            assert widget.id == id
Пример #4
0
    def post(self, request, organization):
        """
        Upload chunks and store them as FileBlobs
        `````````````````````````````````````````
        :pparam file file: The filename should be sha1 hash of the content.
                            Also not you can add up to MAX_CHUNKS_PER_REQUEST files
                            in this request.

        :auth: required
        """
        # Create a unique instance so our logger can be decoupled from the request
        # and used in threads.
        logger = logging.getLogger("sentry.files")
        logger.info("chunkupload.start")

        files = []
        if request.data:
            files = request.data.getlist("file")
            files += [GzipChunk(chunk) for chunk in request.data.getlist("file_gzip")]

        if len(files) == 0:
            # No files uploaded is ok
            logger.info("chunkupload.end", extra={"status": status.HTTP_200_OK})
            return Response(status=status.HTTP_200_OK)

        logger.info("chunkupload.post.files", extra={"len": len(files)})

        # Validate file size
        checksums = []
        size = 0
        for chunk in files:
            size += chunk.size
            if chunk.size > settings.SENTRY_CHUNK_UPLOAD_BLOB_SIZE:
                logger.info("chunkupload.end", extra={"status": status.HTTP_400_BAD_REQUEST})
                return Response(
                    {"error": "Chunk size too large"}, status=status.HTTP_400_BAD_REQUEST
                )
            checksums.append(chunk.name)

        if size > MAX_REQUEST_SIZE:
            logger.info("chunkupload.end", extra={"status": status.HTTP_400_BAD_REQUEST})
            return Response({"error": "Request too large"}, status=status.HTTP_400_BAD_REQUEST)

        if len(files) > MAX_CHUNKS_PER_REQUEST:
            logger.info("chunkupload.end", extra={"status": status.HTTP_400_BAD_REQUEST})
            return Response({"error": "Too many chunks"}, status=status.HTTP_400_BAD_REQUEST)

        try:
            FileBlob.from_files(zip(files, checksums), organization=organization, logger=logger)
        except OSError as err:
            logger.info("chunkupload.end", extra={"status": status.HTTP_400_BAD_REQUEST})
            return Response({"error": str(err)}, status=status.HTTP_400_BAD_REQUEST)

        logger.info("chunkupload.end", extra={"status": status.HTTP_200_OK})
        return Response(status=status.HTTP_200_OK)
Пример #5
0
    def _get_commit_metadata(self, item_list, user):
        """
        Returns a dictionary of release_id => commit metadata,
        where each commit metadata dict contains commit_count
        and an array of authors.
        e.g.
        {
            1: {
                'latest_commit': <Commit id=1>,
                'authors': [<User id=1>, <User id=2>]
            },
            ...
        }
        """
        author_ids = set()
        for obj in item_list:
            author_ids.update(obj.authors)

        if author_ids:
            authors = list(CommitAuthor.objects.filter(id__in=author_ids))
        else:
            authors = []

        if authors:
            org_ids = set(item.organization_id for item in item_list)
            if len(org_ids) != 1:
                users_by_author = {}
            else:
                users_by_author = get_users_for_authors(
                    organization_id=org_ids.pop(), authors=authors, user=user
                )
        else:
            users_by_author = {}

        commit_ids = set((o.last_commit_id for o in item_list if o.last_commit_id))
        if commit_ids:
            commit_list = list(Commit.objects.filter(id__in=commit_ids).select_related("author"))
            commits = {c.id: d for c, d in zip(commit_list, serialize(commit_list, user))}
        else:
            commits = {}

        result = {}
        for item in item_list:
            item_authors = []
            seen_authors = set()
            for user in (users_by_author.get(a) for a in item.authors):
                if user and user["email"] not in seen_authors:
                    seen_authors.add(user["email"])
                    item_authors.append(user)

            result[item] = {
                "authors": item_authors,
                "last_commit": commits.get(item.last_commit_id),
            }
        return result
    def test_post_with_widgets(self):
        data = {
            "title": "Dashboard from Post",
            "widgets": [
                {
                    "displayType": "line",
                    "interval": "5m",
                    "title": "Transaction count()",
                    "queries": [
                        {
                            "name": "Transactions",
                            "fields": ["count()"],
                            "conditions": "event.type:transaction",
                        }
                    ],
                },
                {
                    "displayType": "bar",
                    "interval": "5m",
                    "title": "Error count()",
                    "queries": [
                        {"name": "Errors", "fields": ["count()"], "conditions": "event.type:error"}
                    ],
                },
            ],
        }
        response = self.do_request("post", self.url, data=data)
        assert response.status_code == 201, response.data
        dashboard = Dashboard.objects.get(
            organization=self.organization, title="Dashboard from Post"
        )
        assert dashboard.created_by == self.user

        widgets = self.get_widgets(dashboard.id)
        assert len(widgets) == 2

        for expected_widget, actual_widget in zip(data["widgets"], widgets):
            self.assert_serialized_widget(expected_widget, actual_widget)

            queries = actual_widget.dashboardwidgetquery_set.all()
            for expected_query, actual_query in zip(expected_widget["queries"], queries):
                self.assert_serialized_widget_query(expected_query, actual_query)
Пример #7
0
class GroupValidator(serializers.Serializer):
    inbox = serializers.BooleanField()
    inboxDetails = InboxDetailsValidator()
    status = serializers.ChoiceField(choices=zip(STATUS_CHOICES.keys(), STATUS_CHOICES.keys()))
    statusDetails = StatusDetailsValidator()
    hasSeen = serializers.BooleanField()
    isBookmarked = serializers.BooleanField()
    isPublic = serializers.BooleanField()
    isSubscribed = serializers.BooleanField()
    merge = serializers.BooleanField()
    discard = serializers.BooleanField()
    ignoreDuration = serializers.IntegerField()
    ignoreCount = serializers.IntegerField()
    # in minutes, max of one week
    ignoreWindow = serializers.IntegerField(max_value=7 * 24 * 60)
    ignoreUserCount = serializers.IntegerField()
    # in minutes, max of one week
    ignoreUserWindow = serializers.IntegerField(max_value=7 * 24 * 60)
    assignedTo = ActorField()

    # TODO(dcramer): remove in 9.0
    # for the moment, the CLI sends this for any issue update, so allow nulls
    snoozeDuration = serializers.IntegerField(allow_null=True)

    def validate_assignedTo(self, value):
        if (
            value
            and value.type is User
            and not self.context["project"].member_set.filter(user_id=value.id).exists()
        ):
            raise serializers.ValidationError("Cannot assign to non-team member")

        if (
            value
            and value.type is Team
            and not self.context["project"].teams.filter(id=value.id).exists()
        ):
            raise serializers.ValidationError(
                "Cannot assign to a team without access to the project"
            )

        return value

    def validate_discard(self, value):
        access = self.context.get("access")
        if value and (not access or not access.has_scope("event:admin")):
            raise serializers.ValidationError("You do not have permission to discard events")
        return value

    def validate(self, attrs):
        attrs = super(GroupValidator, self).validate(attrs)
        if len(attrs) > 1 and "discard" in attrs:
            raise serializers.ValidationError("Other attributes cannot be updated when discarding")
        return attrs
Пример #8
0
 def assert_widget_data_sources(self, widget_id, data):
     result_data_sources = sorted(
         WidgetDataSource.objects.filter(widget_id=widget_id, status=ObjectStatus.VISIBLE),
         key=lambda x: x.order,
     )
     data.sort(key=lambda x: x["order"])
     for ds, expected_ds in zip(result_data_sources, data):
         assert ds.name == expected_ds["name"]
         assert ds.type == WidgetDataSourceTypes.get_id_for_type_name(expected_ds["type"])
         assert ds.order == expected_ds["order"]
         assert ds.data == expected_ds["data"]
Пример #9
0
def shingle(n, iterator):
    """\
    Shingle a token stream into N-grams.

    >>> list(shingle(2, ('foo', 'bar', 'baz')))
    [('foo', 'bar'), ('bar', 'baz')]
    """
    return zip(*map(
        lambda i__iterator: advance(i__iterator[0], i__iterator[1]),
        enumerate(itertools.tee(iterator, n)),
    ))
Пример #10
0
def build(name, fields):
    names, prepare_fields, merge_fields = zip(*fields)

    cls = namedtuple(name, names)

    def prepare(*args):
        return cls(*[f(*args) for f in prepare_fields])

    def merge(target, other):
        return cls(*[f(target[i], other[i]) for i, f in enumerate(merge_fields)])

    return cls, prepare, merge
Пример #11
0
def to_context(organization, interval, reports):
    report = reduce(merge_reports, reports.values())
    error_series = [
        # Drop the transaction count from each series entry
        (to_datetime(timestamp), Point(*values[:2]))
        for timestamp, values in report.series
    ]
    return {
        # This "error_series" can be removed for new email template
        "error_series": {
            "points": error_series,
            "maximum": max(sum(point) for timestamp, point in error_series),
            "all": sum(sum(point) for timestamp, point in error_series),
            "resolved":
            sum(point.resolved for timestamp, point in error_series),
        },
        "distribution": {
            "types":
            list(
                zip(
                    (
                        DistributionType("New", "#DF5120"),
                        DistributionType("Reopened", "#FF7738"),
                        DistributionType("Existing", "#F9C7B9"),
                    ),
                    report.issue_summaries,
                )),
            "total":
            sum(report.issue_summaries),
        },
        "comparisons": [
            ("last week", change(report.aggregates[-1],
                                 report.aggregates[-2])),
            (
                "four week average",
                change(
                    report.aggregates[-1],
                    mean(report.aggregates) if all(
                        v is not None for v in report.aggregates) else None,
                ),
            ),
        ],
        "projects": {
            "series": build_project_breakdown_series(reports)
        },
        "calendar":
        to_calendar(organization, interval, report.calendar_series),
        "key_errors":
        build_key_errors_ctx(report.key_events, organization),
        "key_transactions":
        build_key_transactions_ctx(report.key_transactions, organization,
                                   reports.keys()),
    }
Пример #12
0
    def get_attrs(self, item_list, user):
        user_list = list(
            User.objects.filter(id__in=[item.actor_id for item in item_list]))
        users = {
            u.id: d
            for u, d in zip(user_list, serialize(user_list, user))
        }

        attrs = {}
        for item in item_list:
            attrs[item] = {"user": users.get(item.actor_id, {})}
        return attrs
Пример #13
0
    def assert_dashboard_and_widgets(self, widget_ids, order):
        assert Dashboard.objects.filter(organization=self.organization,
                                        id=self.dashboard.id).exists()

        widgets = self.sort_by_order(
            Widget.objects.filter(dashboard_id=self.dashboard.id,
                                  status=ObjectStatus.VISIBLE))
        assert len(widgets) == len(list(widget_ids))

        for widget, id, order in zip(widgets, widget_ids, order):
            assert widget.id == id
            assert widget.order == order
Пример #14
0
def merge_sequences(target, other, function=operator.add):
    """
    Merge two sequences into a single sequence. The length of the two
    sequences must be equal.
    """
    assert len(target) == len(other), "sequence lengths must match"

    rt_type = type(target)
    if rt_type == range:
        rt_type = list

    return rt_type([function(x, y) for x, y in zip(target, other)])
Пример #15
0
    def get_attrs(self, item_list, user):
        release_list = list(
            Release.objects.filter(id__in=[i.release_id for i in item_list]))
        releases = {
            r.id: d
            for r, d in zip(release_list, serialize(release_list, user))
        }

        result = {}
        for item in item_list:
            result[item] = {"release": releases.get(item.release_id)}
        return result
Пример #16
0
    def _get_group_snuba_stats(self, item_list, seen_stats):
        start = self._get_start_from_seen_stats(seen_stats)
        unhandled = {}

        cache_keys = []
        for item in item_list:
            cache_keys.append("group-mechanism-handled:%d" % item.id)

        cache_data = cache.get_many(cache_keys)
        for item, cache_key in zip(item_list, cache_keys):
            unhandled[item.id] = cache_data.get(cache_key)

        filter_keys = {}
        for item in item_list:
            if unhandled.get(item.id) is not None:
                continue
            filter_keys.setdefault("project_id", []).append(item.project_id)
            filter_keys.setdefault("group_id", []).append(item.id)

        if filter_keys:
            rv = raw_query(
                dataset=Dataset.Events,
                selected_columns=[
                    "group_id",
                    [
                        "argMax",
                        [["has", ["exception_stacks.mechanism_handled", 0]],
                         "timestamp"],
                        "unhandled",
                    ],
                ],
                groupby=["group_id"],
                filter_keys=filter_keys,
                start=start,
                orderby="group_id",
                referrer="group.unhandled-flag",
            )
            for x in rv["data"]:
                unhandled[x["group_id"]] = x["unhandled"]

                # cache the handled flag for 60 seconds.  This is broadly in line with
                # the time we give for buffer flushes so the user experience is somewhat
                # consistent here.
                cache.set("group-mechanism-handled:%d" % x["group_id"],
                          x["unhandled"], 60)

        return {
            group_id: {
                "unhandled": unhandled
            }
            for group_id, unhandled in unhandled.items()
        }
Пример #17
0
def resolve_function(field, match=None, params=None):
    if not match:
        match = FUNCTION_PATTERN.search(field)
        if not match or match.group("function") not in FUNCTIONS:
            raise InvalidSearchQuery(
                u"{} is not a valid function".format(field))

    function = FUNCTIONS[match.group("function")]
    columns = [
        c.strip() for c in match.group("columns").split(",")
        if len(c.strip()) > 0
    ]

    # Some functions can optionally take no parameters (rpm(), rps()). In that case use the
    # passed in params to create a default argument if necessary.
    used_default = False
    if len(columns) == 0 and len(function["args"]) == 1:
        try:
            default = function["args"][0].has_default(params)
        except InvalidFunctionArgument as e:
            raise InvalidSearchQuery(u"{}: invalid arguments: {}".format(
                field, e))

        if default:
            # Hacky, but we expect column arguments to be strings so easiest to convert it back
            columns = [six.text_type(default)]
            used_default = True

    if len(columns) != len(function["args"]):
        raise InvalidSearchQuery(u"{}: expected {} arguments".format(
            field, len(function["args"])))

    arguments = {}
    for column_value, argument in zip(columns, function["args"]):
        try:
            normalized_value = argument.normalize(column_value)
            arguments[argument.name] = normalized_value
        except InvalidFunctionArgument as e:
            raise InvalidSearchQuery(u"{}: {} argument invalid: {}".format(
                field, argument.name, e))

    snuba_string = function["transform"].format(**arguments)

    return (
        [],
        [[
            snuba_string,
            None,
            get_function_alias(function["name"],
                               columns if not used_default else []),
        ]],
    )
Пример #18
0
 def apply_modifications_to_frame(self,
                                  frames,
                                  match_frames,
                                  idx,
                                  rule=None):
     # Grouping is not stored on the frame
     if self.key == "group":
         return
     if self.key == "app":
         for frame, match_frame in self._slice_to_range(
                 zip(frames, match_frames), idx):
             set_in_app(frame, self.flag)
             match_frame["in_app"] = frame["in_app"]
Пример #19
0
 def get_attrs(self, item_list, user):
     crash_files = get_crash_files(item_list)
     serialized_files = {
         file.event_id: serialized
         for file, serialized in zip(crash_files,
                                     serialize(crash_files, user=user))
     }
     return {
         event: {
             "crash_file": serialized_files.get(event.event_id)
         }
         for event in item_list
     }
Пример #20
0
    def get_frequency_series(self, model, items, start, end=None, rollup=None, environment_id=None):
        self.validate_arguments([model], [environment_id])

        if not self.enable_frequency_sketches:
            raise NotImplementedError("Frequency sketches are disabled.")

        rollup, series = self.get_optimal_rollup_series(start, end, rollup)

        # Here we freeze ordering of the members, since we'll be passing these
        # as positional arguments to the Redis script and later associating the
        # results (which are returned in the same order that the arguments were
        # provided) with the original input values to compose the result.
        for key, members in items.items():
            items[key] = list(members)

        commands = {}

        arguments = ["ESTIMATE"] + list(self.DEFAULT_SKETCH_PARAMETERS)
        for key, members in items.items():
            ks = []
            for timestamp in series:
                ks.extend(
                    self.make_frequency_table_keys(model, rollup, timestamp, key, environment_id)
                )

            commands[key] = [(CountMinScript, ks, arguments + members)]

        results = {}

        cluster, _ = self.get_cluster(environment_id)
        for key, responses in cluster.execute_commands(commands).items():
            members = items[key]

            chunk = results[key] = []
            for timestamp, scores in zip(series, responses[0].value):
                chunk.append((timestamp, dict(zip(members, map(float, scores)))))

        return results
Пример #21
0
    def get_attrs(self, item_list, user, **kwargs):
        alert_rules = {item.id: item for item in item_list}
        attach_foreignkey(item_list, AlertRule.snuba_query, related=("environment",))

        result = defaultdict(dict)
        triggers = AlertRuleTrigger.objects.filter(alert_rule__in=item_list).order_by("label")
        serialized_triggers = serialize(list(triggers))
        for trigger, serialized in zip(triggers, serialized_triggers):
            alert_rule_triggers = result[alert_rules[trigger.alert_rule_id]].setdefault(
                "triggers", []
            )
            alert_rule_triggers.append(serialized)

        return result
Пример #22
0
    def get(self, request: Request, group) -> Response:
        version = request.GET.get("version", None)
        if version == "2":
            if not feature_flags.has("projects:similarity-view-v2",
                                     group.project):
                return Response(
                    {"error": "Project does not have Similarity V2 feature."})

            features = similarity.features2
        elif version in ("1", None):
            features = similarity.features
        else:
            return Response({"error": "Invalid value for version parameter."})

        limit = request.GET.get("limit", None)
        if limit is not None:
            limit = int(limit) + 1  # the target group will always be included

        group_ids = []
        group_scores = []

        for group_id, scores in features.compare(group, limit=limit):
            if group_id != group.id:
                group_ids.append(group_id)
                group_scores.append(scores)

        serialized_groups = {
            int(g["id"]): g
            for g in serialize(list(
                Group.objects.get_many_from_cache(group_ids)),
                               user=request.user)
        }

        results = []

        # We need to preserve the ordering of the Redis results, as that
        # ordering is directly shown in the UI
        for group_id, scores in zip(group_ids, group_scores):
            group = serialized_groups.get(group_id)
            if group is None:
                # TODO(tkaemming): This should log when we filter out a group that is
                # unable to be retrieved from the database. (This will soon be
                # unexpected behavior, but still possible.)
                continue

            results.append(
                (group, {_fix_label(k): v
                         for k, v in scores.items()}))

        return Response(results)
Пример #23
0
    def compare(self, group, limit=None, thresholds=None):
        if thresholds is None:
            thresholds = {}

        features = list(self.features.keys())

        items = [(self.aliases[label], thresholds.get(label, 0)) for label in features]

        return map(
            lambda key__scores: (int(key__scores[0]), dict(zip(features, key__scores[1]))),
            self.index.compare(
                self.__get_scope(group.project), self.__get_key(group), items, limit=limit
            ),
        )
Пример #24
0
def bulk_get_incident_event_stats(incidents, query_params_list, data_points=50):
    snuba_params_list = [
        SnubaQueryParams(
            aggregations=[
                (
                    query_aggregation_to_snuba[QueryAggregations(incident.aggregation)][0],
                    query_aggregation_to_snuba[QueryAggregations(incident.aggregation)][1],
                    "count",
                )
            ],
            orderby="time",
            groupby=["time"],
            rollup=max(int(incident.duration.total_seconds() / data_points), 1),
            limit=10000,
            **query_param
        )
        for incident, query_param in zip(incidents, query_params_list)
    ]
    results = bulk_raw_query(snuba_params_list, referrer="incidents.get_incident_event_stats")
    return [
        SnubaTSResult(result, snuba_params.start, snuba_params.end, snuba_params.rollup)
        for snuba_params, result in zip(snuba_params_list, results)
    ]
Пример #25
0
def bulk_get_incident_event_stats(incidents, query_params_list):
    snuba_params_list = [
        SnubaQueryParams(aggregations=[(
            query_aggregation_to_snuba[aggregate_to_query_aggregation[
                incident.alert_rule.snuba_query.aggregate]][0],
            query_aggregation_to_snuba[aggregate_to_query_aggregation[
                incident.alert_rule.snuba_query.aggregate]][1],
            "count",
        )],
                         orderby="time",
                         groupby=["time"],
                         rollup=incident.alert_rule.snuba_query.time_window,
                         limit=10000,
                         **query_param)
        for incident, query_param in zip(incidents, query_params_list)
    ]
    results = bulk_raw_query(snuba_params_list,
                             referrer="incidents.get_incident_event_stats")
    return [
        SnubaTSResult(result, snuba_params.start, snuba_params.end,
                      snuba_params.rollup)
        for snuba_params, result in zip(snuba_params_list, results)
    ]
Пример #26
0
def bulk_get_incident_stats(incidents, windowed_stats=False):
    """
    Returns bulk stats for a list of incidents. This includes unique user count,
    total event count and event stats.
    Note that even though this function accepts a windowed_stats parameter, it does not
    affect the snapshots. Only the live fetched stats.
    """
    incident_stats = {}
    if windowed_stats:
        # At the moment, snapshots are only ever created with windowed_stats as True
        # so if they send False, we need to do a live calculation below.
        closed = [
            i for i in incidents if i.status == IncidentStatus.CLOSED.value
        ]
        snapshots = IncidentSnapshot.objects.filter(incident__in=closed)
        for snapshot in snapshots:
            event_stats = snapshot.event_stats_snapshot
            incident_stats[snapshot.incident_id] = {
                "event_stats":
                SnubaTSResult(event_stats.snuba_values, event_stats.start,
                              event_stats.end, event_stats.period),
                "total_events":
                snapshot.total_events,
                "unique_users":
                snapshot.unique_users,
            }

    to_fetch = [i for i in incidents if i.id not in incident_stats]
    if to_fetch:
        query_params_list = bulk_build_incident_query_params(
            to_fetch, windowed_stats=False)
        if windowed_stats:
            windowed_query_params_list = bulk_build_incident_query_params(
                to_fetch, windowed_stats=True)
            all_event_stats = bulk_get_incident_event_stats(
                to_fetch, windowed_query_params_list)
        else:
            all_event_stats = bulk_get_incident_event_stats(
                to_fetch, query_params_list)
        all_aggregates = bulk_get_incident_aggregates(query_params_list)
        for incident, event_stats, aggregates in zip(to_fetch, all_event_stats,
                                                     all_aggregates):
            incident_stats[incident.id] = {
                "event_stats": event_stats,
                "total_events": aggregates["count"],
                "unique_users": aggregates["unique_users"],
            }

    return [incident_stats[incident.id] for incident in incidents]
Пример #27
0
    def get(self, request, organization):
        if not features.has("organizations:discover-basic",
                            organization,
                            actor=request.user):
            return self.get_v1_results(request, organization)

        try:
            columns = request.GET.getlist("yAxis", ["count()"])
            params = self.get_filter_params(request, organization)
            rollup = self.get_rollup(request, params)
            # Backwards compatibility for incidents which uses the old
            # column aliases as it straddles both versions of events/discover.
            # We will need these aliases until discover2 flags are enabled for all
            # users.
            column_map = {
                "user_count": "count_unique(user)",
                "event_count": "count()",
                "rpm()": "rpm(%d)" % rollup,
                "rps()": "rps(%d)" % rollup,
            }
            query_columns = [
                column_map.get(column, column) for column in columns
            ]

            result = discover.timeseries_query(
                selected_columns=query_columns,
                query=request.GET.get("query"),
                params=params,
                rollup=rollup,
                reference_event=self.reference_event(request, organization,
                                                     params.get("start"),
                                                     params.get("end")),
                referrer="api.organization-event-stats",
            )
        except InvalidSearchQuery as err:
            raise ParseError(detail=six.text_type(err))
        serializer = SnubaTSResultSerializer(organization, None, request.user)
        if len(columns) > 1:
            # Return with requested yAxis as the key
            data = {
                column: serializer.serialize(
                    result,
                    get_aggregate_alias(
                        AGGREGATE_PATTERN.search(query_column)))
                for column, query_column in zip(columns, query_columns)
            }
        else:
            data = serializer.serialize(result)
        return Response(data, status=200)
Пример #28
0
    def collect(self):
        try:
            os.remove(self.storage.path(VERSION_PATH))
        except OSError:
            pass

        collected = super(Command, self).collect()
        paths = sorted(set(chain(*itemgetter(*collected.keys())(collected))))
        abs_paths = map(self.storage.path, paths)
        version = get_bundle_version(zip(paths, abs_paths))
        echo("-----------------")
        echo(version)
        with open(self.storage.path(VERSION_PATH), "wb") as fp:
            fp.write(version.encode("utf-8"))
        return collected
Пример #29
0
    def get_attrs(self, item_list, user):
        queryset = list(
            EventUser.objects.filter(
                id__in=[i.event_user_id for i in item_list]))

        event_users = {
            e.id: d
            for e, d in zip(queryset, serialize(queryset, user))
        }

        attrs = {}
        for item in item_list:
            attrs[item] = {"event_user": event_users.get(item.event_user_id)}

        return attrs
Пример #30
0
    def get_attrs(self, item_list, user, **kwargs):
        attach_foreignkey(item_list, AlertRuleTrigger.alert_rule)

        triggers = {item.id: item for item in item_list}
        result = defaultdict(dict)

        actions = AlertRuleTriggerAction.objects.filter(
            alert_rule_trigger__in=item_list).order_by("id")
        serialized_actions = serialize(list(actions))
        for trigger, serialized in zip(actions, serialized_actions):
            triggers_actions = result[triggers[
                trigger.alert_rule_trigger_id]].setdefault("actions", [])
            triggers_actions.append(serialized)

        return result