Esempio n. 1
0
    def safe_delete(self):
        """Deletes a release if possible or raises a `UnsafeReleaseDeletion`
        exception.
        """
        from sentry import release_health
        from sentry.models import Group, ReleaseFile

        # we don't want to remove the first_release metadata on the Group, and
        # while people might want to kill a release (maybe to remove files),
        # removing the release is prevented
        if Group.objects.filter(first_release=self).exists():
            raise UnsafeReleaseDeletion(ERR_RELEASE_REFERENCED)

        # We do not allow releases with health data to be deleted because
        # the upserting from snuba data would create the release again.
        # We would need to be able to delete this data from snuba which we
        # can't do yet.
        project_ids = list(self.projects.values_list("id").all())
        if release_health.check_has_health_data([(p[0], self.version)
                                                 for p in project_ids]):
            raise UnsafeReleaseDeletion(ERR_RELEASE_HEALTH_DATA)

        # TODO(dcramer): this needs to happen in the queue as it could be a long
        # and expensive operation
        file_list = ReleaseFile.objects.filter(
            release_id=self.id).select_related("file")
        for releasefile in file_list:
            releasefile.file.delete()
            releasefile.delete()
        self.delete()
Esempio n. 2
0
    def get_session_stats(self, project_ids):
        segments, interval = STATS_PERIOD_CHOICES[self.stats_period]

        now = timezone.now()
        current_interval_start = now - (segments * interval)
        previous_interval_start = now - (2 * segments * interval)

        project_health_data_dict = release_health.get_current_and_previous_crash_free_rates(
            project_ids=project_ids,
            current_start=current_interval_start,
            current_end=now,
            previous_start=previous_interval_start,
            previous_end=current_interval_start,
            rollup=int(interval.total_seconds()),
        )

        # list that contains ids of projects that has both `currentCrashFreeRate` and
        # `previousCrashFreeRate` set to None and so we are not sure if they have health data or
        # not and so we add those ids to this list to check later
        check_has_health_data_ids = []

        for project_id in project_ids:
            current_crash_free_rate = project_health_data_dict[project_id][
                "currentCrashFreeRate"]
            previous_crash_free_rate = project_health_data_dict[project_id][
                "previousCrashFreeRate"]

            if [current_crash_free_rate, previous_crash_free_rate
                ] != [None, None]:
                project_health_data_dict[project_id]["hasHealthData"] = True
            else:
                project_health_data_dict[project_id]["hasHealthData"] = False
                check_has_health_data_ids.append(project_id)

        # For project ids we are not sure if they have health data in the last 90 days we
        # call -> check_has_data with those ids and then update our `project_health_data_dict`
        # accordingly
        if check_has_health_data_ids:
            projects_with_health_data = release_health.check_has_health_data(
                check_has_health_data_ids)
            for project_id in projects_with_health_data:
                project_health_data_dict[project_id]["hasHealthData"] = True

        return project_health_data_dict
Esempio n. 3
0
def get_release_health_data_overview(
    project_releases,
    environments=None,
    summary_stats_period=None,
    health_stats_period=None,
    stat=None,
):
    """Checks quickly for which of the given project releases we have
    health data available.  The argument is a tuple of `(project_id, release_name)`
    tuples.  The return value is a set of all the project releases that have health
    data.
    """
    if stat is None:
        stat = "sessions"
    assert stat in ("sessions", "users")

    _, summary_start, _ = get_rollup_starts_and_buckets(summary_stats_period or "24h")
    conditions, filter_keys = _get_conditions_and_filter_keys(project_releases, environments)

    stats_rollup, stats_start, stats_buckets = get_rollup_starts_and_buckets(health_stats_period)

    missing_releases = set(project_releases)
    rv = {}
    for x in raw_query(
        dataset=Dataset.Sessions,
        selected_columns=[
            "release",
            "project_id",
            "duration_quantiles",
            "sessions",
            "sessions_errored",
            "sessions_crashed",
            "sessions_abnormal",
            "users",
            "users_crashed",
        ],
        groupby=["release", "project_id"],
        start=summary_start,
        conditions=conditions,
        filter_keys=filter_keys,
        referrer="sessions.release-overview",
    )["data"]:
        rp = {
            "crash_free_users": (
                100 - x["users_crashed"] / float(x["users"]) * 100 if x["users"] else None
            ),
            "crash_free_sessions": (
                100 - x["sessions_crashed"] / float(x["sessions"]) * 100 if x["sessions"] else None
            ),
            "total_users": x["users"],
            "total_sessions": x["sessions"],
            "sessions_crashed": x["sessions_crashed"],
            "sessions_errored": max(
                0, x["sessions_errored"] - x["sessions_crashed"] - x["sessions_abnormal"]
            ),
            "has_health_data": True,
        }
        rp.update(extract_duration_quantiles(x))
        if health_stats_period:
            rp["stats"] = {
                health_stats_period: _make_stats(stats_start, stats_rollup, stats_buckets)
            }
        rv[x["project_id"], x["release"]] = rp
        missing_releases.discard((x["project_id"], x["release"]))

    # Add releases without data points
    if missing_releases:
        # If we're already looking at a 90 day horizont we don't need to
        # fire another query, we can already assume there is no data.
        if summary_stats_period != "90d":
            has_health_data = release_health.check_has_health_data(missing_releases)
        else:
            has_health_data = ()
        for key in missing_releases:
            rv[key] = {
                "duration_p50": None,
                "duration_p90": None,
                "crash_free_users": None,
                "crash_free_sessions": None,
                "total_users": 0,
                "total_sessions": 0,
                "sessions_crashed": 0,
                "sessions_errored": 0,
                "has_health_data": key in has_health_data,
            }
            if health_stats_period:
                rv[key]["stats"] = {
                    health_stats_period: _make_stats(stats_start, stats_rollup, stats_buckets)
                }

    release_adoption = release_health.get_release_adoption(project_releases, environments)
    for key in rv:
        adoption_info = release_adoption.get(key) or {}
        rv[key]["adoption"] = adoption_info.get("adoption")
        rv[key]["sessions_adoption"] = adoption_info.get("sessions_adoption")
        rv[key]["total_users_24h"] = adoption_info.get("users_24h")
        rv[key]["total_project_users_24h"] = adoption_info.get("project_users_24h")
        rv[key]["total_sessions_24h"] = adoption_info.get("sessions_24h")
        rv[key]["total_project_sessions_24h"] = adoption_info.get("project_sessions_24h")

    if health_stats_period:
        for x in raw_query(
            dataset=Dataset.Sessions,
            selected_columns=["release", "project_id", "bucketed_started", stat],
            groupby=["release", "project_id", "bucketed_started"],
            rollup=stats_rollup,
            start=stats_start,
            conditions=conditions,
            filter_keys=filter_keys,
            referrer="sessions.release-stats",
        )["data"]:
            time_bucket = int(
                (parse_snuba_datetime(x["bucketed_started"]) - stats_start).total_seconds()
                / stats_rollup
            )
            key = (x["project_id"], x["release"])
            # Sometimes this might return a release we haven't seen yet or it might
            # return a time bucket that did not exist yet at the time of the initial
            # query.  In that case, just skip it.
            if key in rv and time_bucket < len(rv[key]["stats"][health_stats_period]):
                rv[key]["stats"][health_stats_period][time_bucket][1] = x[stat]

    return rv