Example #1
0
def create_counter_function(db, created_models, **kwargs):
    if not is_postgres(db):
        return

    if Counter not in created_models:
        return

    cursor = connections[db].cursor()
    cursor.execute(
        '''
        create or replace function sentry_increment_project_counter(
            project bigint, delta int) returns int as $$
        declare
          new_val int;
        begin
          loop
            update sentry_projectcounter set value = value + delta
             where project_id = project
               returning value into new_val;
            if found then
              return new_val;
            end if;
            begin
              insert into sentry_projectcounter(project_id, value)
                   values (project, delta)
                returning value into new_val;
              return new_val;
            exception when unique_violation then
            end;
          end loop;
        end
        $$ language plpgsql;
    '''
    )
Example #2
0
def create_default_project(id, name, slug, verbosity=2, **kwargs):
    if Project.objects.filter(id=id).exists():
        return

    try:
        user = User.objects.filter(is_superuser=True)[0]
    except IndexError:
        user, _ = User.objects.get_or_create(username="******", defaults={"email": "sentry@localhost"})

    org, _ = Organization.objects.get_or_create(slug="sentry", defaults={"owner": user, "name": "Sentry"})

    team, _ = Team.objects.get_or_create(organization=org, slug="sentry", defaults={"name": "Sentry"})

    project = Project.objects.create(
        id=id, public=False, name=name, slug=slug, team=team, organization=team.organization, **kwargs
    )

    # HACK: manually update the ID after insert due to Postgres
    # sequence issues. Seriously, f**k everything about this.
    if db.is_postgres(project._state.db):
        connection = connections[project._state.db]
        cursor = connection.cursor()
        cursor.execute(PROJECT_SEQUENCE_FIX)

    project.update_option("sentry:origins", ["*"])

    if verbosity > 0:
        print("Created internal Sentry project (slug=%s, id=%s)" % (project.slug, project.id))

    return project
Example #3
0
    def get_group_tag_value_count(self, project_id, group_id, environment_id, key):
        if db.is_postgres():
            # This doesnt guarantee percentage is accurate, but it does ensure
            # that the query has a maximum cost
            using = router.db_for_read(models.GroupTagValue)
            cursor = connections[using].cursor()
            cursor.execute(
                """
                SELECT SUM(t)
                FROM (
                    SELECT times_seen as t
                    FROM sentry_messagefiltervalue
                    WHERE group_id = %s
                    AND key = %s
                    ORDER BY last_seen DESC
                    LIMIT 10000
                ) as a
            """, [group_id, key]
            )
            return cursor.fetchone()[0] or 0

        cutoff = timezone.now() - timedelta(days=7)
        return models.GroupTagValue.objects.filter(
            group_id=group_id,
            key=key,
            last_seen__gte=cutoff,
        ).aggregate(t=Sum('times_seen'))['t']
Example #4
0
    def get_top_values(cls, group_id, key, limit=3):
        if db.is_postgres():
            # This doesnt guarantee percentage is accurate, but it does ensure
            # that the query has a maximum cost
            return list(
                cls.objects.raw(
                    """
                SELECT *
                FROM (
                    SELECT *
                    FROM sentry_messagefiltervalue
                    WHERE group_id = %%s
                    AND key = %%s
                    ORDER BY last_seen DESC
                    LIMIT 10000
                ) as a
                ORDER BY times_seen DESC
                LIMIT %d
            """
                    % limit,
                    [group_id, key],
                )
            )

        cutoff = timezone.now() - timedelta(days=7)
        return list(cls.objects.filter(group=group_id, key=key, last_seen__gte=cutoff).order_by("-times_seen")[:limit])
Example #5
0
def bulk_delete_objects(model, limit=10000, transaction_id=None, logger=None, **filters):
    connection = connections[router.db_for_write(model)]
    quote_name = connection.ops.quote_name

    query = []
    params = []
    for column, value in filters.items():
        query.append('%s = %%s' % (quote_name(column), ))
        params.append(value)

    if db.is_postgres():
        query = """
            delete from %(table)s
            where id = any(array(
                select id
                from %(table)s
                where (%(query)s)
                limit %(limit)d
            ))
        """ % dict(
            query=' AND '.join(query),
            table=model._meta.db_table,
            limit=limit,
        )
    elif db.is_mysql():
        query = """
            delete from %(table)s
            where (%(query)s)
            limit %(limit)d
        """ % dict(
            query=' AND '.join(query),
            table=model._meta.db_table,
            limit=limit,
        )
    else:
        if logger is not None:
            logger.warning('Using slow deletion strategy due to unknown database')
        has_more = False
        for obj in model.objects.filter(**filters)[:limit]:
            obj.delete()
            has_more = True
        return has_more

    cursor = connection.cursor()
    cursor.execute(query, params)

    has_more = cursor.rowcount > 0

    if has_more and logger is not None and _leaf_re.search(model.__name__) is None:
        logger.info(
            'object.delete.bulk_executed',
            extra=dict(
                filters.items() + [
                    ('model', model.__name__),
                    ('transaction_id', transaction_id),
                ]
            )
        )

    return has_more
 def forwards(self, orm):
     # Adding index on 'Group', fields ['project', 'first_release']
     if is_postgres():
         db.commit_transaction()
         db.execute("CREATE INDEX CONCURRENTLY sentry_groupedmessage_project_id_31335ae34c8ef983 ON sentry_groupedmessage (project_id, first_release_id)")
         db.start_transaction()
     else:
         db.create_index('sentry_groupedmessage', ['project_id', 'first_release_id'])
 def forwards(self, orm):
     # Removing index on 'GroupHash', fields ['hash']
     if is_postgres():
         try:
             with transaction.atomic():
                 db.delete_index(u'sentry_grouphash', ['hash'])
         except Exception:
             pass
Example #8
0
    def iterator(self, chunk_size=100):
        if db.is_postgres():
            g = self.iterator_postgres(chunk_size)
        else:
            g = self.iterator_generic(chunk_size)

        for chunk in g:
            yield chunk
Example #9
0
def increment_project_counter(project, delta=1):
    """This method primarily exists so that south code can use it."""
    if delta <= 0:
        raise ValueError('There is only one way, and that\'s up.')

    cur = connection.cursor()
    try:
        if is_postgres():
            cur.execute(
                '''
                select sentry_increment_project_counter(%s, %s)
            ''', [project.id, delta]
            )
            return cur.fetchone()[0]
        elif is_sqlite():
            value = cur.execute(
                '''
                insert or ignore into sentry_projectcounter
                  (project_id, value) values (%s, 0);
            ''', [project.id]
            )
            value = cur.execute(
                '''
                select value from sentry_projectcounter
                 where project_id = %s
            ''', [project.id]
            ).fetchone()[0]
            while 1:
                cur.execute(
                    '''
                    update sentry_projectcounter
                       set value = value + %s
                     where project_id = %s;
                ''', [delta, project.id]
                )
                changes = cur.execute(
                    '''
                    select changes();
                '''
                ).fetchone()[0]
                if changes != 0:
                    return value + delta
        elif is_mysql():
            cur.execute(
                '''
                insert into sentry_projectcounter
                            (project_id, value)
                     values (%s, @new_val := %s)
           on duplicate key
                     update value = @new_val := value + %s
            ''', [project.id, delta, delta]
            )
            cur.execute('select @new_val')
            return cur.fetchone()[0]
        else:
            raise AssertionError("Not implemented database engine path")
    finally:
        cur.close()
    def get(self, request, project, key):
        """
        List a Tag's Values
        ```````````````````

        Return a list of values associated with this key.  The `query`
        parameter can be used to to perform a "starts with" match on
        values.

        :pparam string organization_slug: the slug of the organization.
        :pparam string project_slug: the slug of the project.
        :pparam string key: the tag key to look up.
        :auth: required
        """
        if key in ('release', 'user', 'filename', 'function'):
            lookup_key = 'sentry:{0}'.format(key)
        else:
            lookup_key = key

        try:
            tagkey = TagKey.objects.get(
                project=project,
                key=lookup_key,
                status=TagKeyStatus.VISIBLE,
            )
        except TagKey.DoesNotExist:
            raise ResourceDoesNotExist

        base_queryset = TagValue.objects.filter(
            project=project,
            key=tagkey.key,
        )

        query = request.GET.get('query')
        if query:
            if is_postgres():
                # not quite optimal, but best we can do with ORM
                queryset = TagValue.objects.filter(
                    id__in=base_queryset.order_by('-times_seen')[:10000]
                )
            else:
                # MySQL can't handle an `IN` with a `LIMIT` clause
                queryset = base_queryset
            queryset = queryset.filter(value__istartswith=query)

        else:
            queryset = TagValue.objects.filter(
                project=project,
                key=tagkey.key,
            )

        return self.paginate(
            request=request,
            queryset=queryset,
            order_by='-times_seen',
            on_results=lambda x: serialize(x, request.user),
        )
Example #11
0
def bulk_delete_objects(model, group_id=None, project_id=None, limit=10000,
                        logger=None):
    assert group_id or project_id, 'Must pass either project_id or group_id'

    if group_id:
        column = 'group_id'
        value = group_id

    elif project_id:
        column = 'project_id'
        value = project_id

    connection = connections['default']
    quote_name = connection.ops.quote_name

    if logger is not None:
        logger.info('Removing %r objects where %s=%r', model, column, value)

    if db.is_postgres():
        query = """
            delete from %(table)s
            where id = any(array(
                select id
                from %(table)s
                where %(column)s = %%s
                limit %(limit)d
            ))
        """ % dict(
            table=model._meta.db_table,
            column=quote_name(column),
            limit=limit,
        )
        print query
        params = [value]
    elif db.is_mysql():
        query = """
            delete from %(table)s
            where %(column)s = %%s
            limit %(limit)d
        """ % dict(
            table=model._meta.db_table,
            column=quote_name(column),
            limit=limit,
        )
        params = [value]
    else:
        logger.warning('Using slow deletion strategy due to unknown database')
        has_more = False
        for obj in model.objects.filter(project=project_id)[:limit]:
            obj.delete()
            has_more = True
        return has_more

    cursor = connection.cursor()
    cursor.execute(query, params)
    return cursor.rowcount > 0
Example #12
0
def create_default_project(id, name, slug, verbosity=2, **kwargs):
    if Project.objects.filter(id=id).exists():
        return

    try:
        user = User.objects.filter(is_superuser=True)[0]
    except IndexError:
        user = None

    org, _ = Organization.objects.get_or_create(
        slug='sentry',
        defaults={
            'name': 'Sentry',
        }
    )

    if user:
        OrganizationMember.objects.get_or_create(
            user=user,
            organization=org,
            role='owner',
        )

    team, _ = Team.objects.get_or_create(
        organization=org,
        slug='sentry',
        defaults={
            'name': 'Sentry',
        }
    )

    project = Project.objects.create(
        id=id,
        public=False,
        name=name,
        slug=slug,
        team=team,
        organization=team.organization,
        **kwargs
    )

    # HACK: manually update the ID after insert due to Postgres
    # sequence issues. Seriously, f**k everything about this.
    if db.is_postgres(project._state.db):
        connection = connections[project._state.db]
        cursor = connection.cursor()
        cursor.execute(PROJECT_SEQUENCE_FIX)

    project.update_option('sentry:origins', ['*'])

    if verbosity > 0:
        echo('Created internal Sentry project (slug=%s, id=%s)' % (project.slug, project.id))

    return project
Example #13
0
def create_citext_extension(db, **kwargs):
    from sentry.utils.db import is_postgres

    # We always need the citext extension installed for Postgres,
    # and for tests, it's not always guaranteed that we will have
    # run full migrations which installed it.
    if is_postgres(db):
        cursor = connections[db].cursor()
        try:
            cursor.execute('CREATE EXTENSION IF NOT EXISTS citext')
        except Exception:
            pass
Example #14
0
def bulk_delete_objects(model, limit=10000, logger=None, using='default',
                        **filters):
    connection = connections[using]
    quote_name = connection.ops.quote_name

    query = []
    params = []
    for column, value in filters.items():
        query.append('%s = %%s' % (quote_name(column),))
        params.append(value)

    if logger is not None:
        logger.info('Removing %r objects where %s=%r', model, column, value)

    if db.is_postgres():
        query = """
            delete from %(table)s
            where id = any(array(
                select id
                from %(table)s
                where (%(query)s)
                limit %(limit)d
            ))
        """ % dict(
            query=' AND '.join(query),
            table=model._meta.db_table,
            limit=limit,
        )
    elif db.is_mysql():
        query = """
            delete from %(table)s
            where (%(query)s)
            limit %(limit)d
        """ % dict(
            query=' AND '.join(query),
            table=model._meta.db_table,
            limit=limit,
        )
    else:
        if logger is not None:
            logger.warning('Using slow deletion strategy due to unknown database')
        has_more = False
        for obj in model.objects.filter(**filters)[:limit]:
            obj.delete()
            has_more = True
        return has_more

    cursor = connection.cursor()
    cursor.execute(query, params)
    return cursor.rowcount > 0
Example #15
0
    def as_sql(self, compiler, connection, function=None, template=None):
        db = getattr(connection, 'alias', 'default')
        has_values = self.last_seen is not None and self.times_seen is not None
        if is_postgres(db):
            if has_values:
                sql = 'log(times_seen + %d) * 600 + %d' % (self.times_seen,
                                                           to_timestamp(self.last_seen))
            else:
                sql = 'log(times_seen) * 600 + last_seen::abstime::int'
        else:
            # XXX: if we cant do it atomically let's do it the best we can
            sql = int(self)

        return (sql, [])
Example #16
0
    def get_top_group_tag_values(self, project_id, group_id,
                                 environment_id, key, limit=TOP_VALUES_DEFAULT_LIMIT):
        if db.is_postgres():
            environment_id = AGGREGATE_ENVIRONMENT_ID if environment_id is None else environment_id

            # This doesnt guarantee percentage is accurate, but it does ensure
            # that the query has a maximum cost
            return list(
                map(
                    transformers[models.GroupTagValue],
                    models.GroupTagValue.objects.raw(
                        """
                        SELECT *
                        FROM (
                            SELECT tagstore_grouptagvalue.*
                            FROM tagstore_grouptagvalue
                            INNER JOIN tagstore_tagkey
                            ON (tagstore_grouptagvalue.key_id = tagstore_tagkey.id)
                            WHERE tagstore_grouptagvalue.group_id = %%s
                            AND tagstore_tagkey.project_id = %%s
                            AND tagstore_grouptagvalue.project_id = %%s
                            AND tagstore_tagkey.environment_id = %%s
                            AND tagstore_tagkey.key = %%s
                            ORDER BY last_seen DESC
                            LIMIT 10000
                        ) as a
                        ORDER BY times_seen DESC
                        LIMIT %d
                        """ % limit,
                        [group_id, project_id, project_id, environment_id, key]
                    ),
                )
            )

        cutoff = timezone.now() - timedelta(days=7)
        qs = models.GroupTagValue.objects.select_related('_key', '_value').filter(
            project_id=project_id,
            group_id=group_id,
            _key__project_id=project_id,
            _key__key=key,
            _value__project_id=project_id,
            last_seen__gte=cutoff,
        )
        qs = self._add_environment_filter(qs, environment_id)
        return list(
            map(
                transformers[models.GroupTagValue],
                qs.order_by('-times_seen')[:limit],
            )
        )
Example #17
0
def bulk_fetch_project_latest_releases(projects):
    """
    Fetches the latest release for each of the passed projects
    :param projects:
    :return: List of Releases, each with an additional `actual_project_id`
    attribute representing the project that they're the latest release for. If
    no release found, no entry will be returned for the given project.
    """
    if is_postgres():
        # XXX: This query could be very inefficient for projects with a large
        # number of releases. To work around this, we only check 20 releases
        # ordered by highest release id, which is generally correlated with
        # most recent releases for a project. This could potentially result in
        # not having the correct most recent release, but in practice will
        # likely work fine.
        release_project_join_sql = """
            JOIN (
                SELECT *
                FROM sentry_release_project lrp
                WHERE lrp.project_id = p.id
                ORDER BY lrp.release_id DESC
                LIMIT 20
            ) lrp ON lrp.release_id = lrr.id
        """
    else:
        release_project_join_sql = 'JOIN sentry_release_project lrp ON lrp.release_id = lrr.id'
    return list(Release.objects.raw(
        u"""
        SELECT lr.project_id as actual_project_id, r.*
        FROM (
            SELECT (
                SELECT lrr.id
                FROM sentry_release lrr
                {}
                WHERE lrp.project_id = p.id
                ORDER BY COALESCE(lrr.date_released, lrr.date_added) DESC
                LIMIT 1
            ) as release_id,
            p.id as project_id
            FROM sentry_project p
            WHERE p.id IN ({})
        ) as lr
        JOIN sentry_release r
        ON r.id = lr.release_id
        """.format(
            release_project_join_sql,
            ', '.join(six.text_type(i.id) for i in projects),
        ),
    ))
Example #18
0
def bulk_delete_objects(model, limit=10000,
                        logger=None, **filters):
    assert len(filters) == 1, 'Must pass a single column=value filter.'

    column, value = filters.items()[0]

    connection = connections['default']
    quote_name = connection.ops.quote_name

    if logger is not None:
        logger.info('Removing %r objects where %s=%r', model, column, value)

    if db.is_postgres():
        query = """
            delete from %(table)s
            where id = any(array(
                select id
                from %(table)s
                where %(column)s = %%s
                limit %(limit)d
            ))
        """ % dict(
            table=model._meta.db_table,
            column=quote_name(column),
            limit=limit,
        )
        params = [value]
    elif db.is_mysql():
        query = """
            delete from %(table)s
            where %(column)s = %%s
            limit %(limit)d
        """ % dict(
            table=model._meta.db_table,
            column=quote_name(column),
            limit=limit,
        )
        params = [value]
    else:
        logger.warning('Using slow deletion strategy due to unknown database')
        has_more = False
        for obj in model.objects.filter(**{column: value})[:limit]:
            obj.delete()
            has_more = True
        return has_more

    cursor = connection.cursor()
    cursor.execute(query, params)
    return cursor.rowcount > 0
Example #19
0
            def get_child_relations(self, instance):
                # in bulk
                model_list = (models.GroupTagValue, models.GroupTagKey, models.TagValue)

                # required to deal with custom SQL queries and the ORM
                # in `bulk_delete_objects`
                key_id_field_name = 'key_id' if (db.is_postgres()) else '_key_id'

                relations = [
                    ModelRelation(m, query={
                        'project_id': instance.project_id,
                        key_id_field_name: instance.id,
                    }, partition_key={'project_id': instance.project_id}) for m in model_list
                ]
                return relations
Example #20
0
def increment_project_counter(project, delta=1):
    """This method primarily exists so that south code can use it."""
    if delta <= 0:
        raise ValueError('There is only one way, and that\'s up.')

    cur = connection.cursor()
    try:
        if is_postgres():
            cur.execute(
                '''
                select sentry_increment_project_counter(%s, %s)
            ''', [project.id, delta]
            )
            return cur.fetchone()[0]
        else:
            raise AssertionError("Not implemented database engine path")
    finally:
        cur.close()
Example #21
0
def scoreclause_sql(sc, connection):
    db = getattr(connection, 'alias', 'default')
    has_values = sc.last_seen is not None and sc.times_seen is not None
    if is_postgres(db):
        if has_values:
            sql = 'log(times_seen + %d) * 600 + %d' % (sc.times_seen, to_timestamp(sc.last_seen))
        else:
            sql = 'log(times_seen) * 600 + last_seen::abstime::int'
    elif is_mysql(db):
        if has_values:
            sql = 'log(times_seen + %d) * 600 + %d' % (sc.times_seen, to_timestamp(sc.last_seen))
        else:
            sql = 'log(times_seen) * 600 + unix_timestamp(last_seen)'
    else:
        # XXX: if we cant do it atomically let's do it the best we can
        sql = int(sc)

    return (sql, [])
Example #22
0
    def get_group_tag_value_count(self, project_id, group_id, environment_id, key):
        if db.is_postgres():
            environment_id = AGGREGATE_ENVIRONMENT_ID if environment_id is None else environment_id

            # This doesnt guarantee percentage is accurate, but it does ensure
            # that the query has a maximum cost
            using = router.db_for_read(models.GroupTagValue)
            cursor = connections[using].cursor()
            cursor.execute(
                """
                SELECT SUM(t)
                FROM (
                    SELECT tagstore_grouptagvalue.times_seen as t
                    FROM tagstore_grouptagvalue
                    INNER JOIN tagstore_tagkey
                    ON (tagstore_grouptagvalue.key_id = tagstore_tagkey.id)
                    WHERE tagstore_grouptagvalue.group_id = %s
                    AND tagstore_tagkey.project_id = %s
                    AND tagstore_grouptagvalue.project_id = %s
                    AND tagstore_tagkey.environment_id = %s
                    AND tagstore_tagkey.key = %s
                    ORDER BY last_seen DESC
                    LIMIT 10000
                ) as a
                """,
                [group_id, project_id, project_id, environment_id, key]
            )
            return cursor.fetchone()[0] or 0

        cutoff = timezone.now() - timedelta(days=7)
        qs = models.GroupTagValue.objects.filter(
            project_id=project_id,
            group_id=group_id,
            _key__project_id=project_id,
            _key__key=key,
            last_seen__gte=cutoff,
        )
        qs = self._add_environment_filter(qs, environment_id)
        return qs.aggregate(t=Sum('times_seen'))['t']
Example #23
0
    def get_value_count(cls, group_id, key):
        if db.is_postgres():
            # This doesnt guarantee percentage is accurate, but it does ensure
            # that the query has a maximum cost
            cursor = connections["default"].cursor()
            cursor.execute(
                """
                SELECT SUM(t)
                FROM (
                    SELECT times_seen as t
                    FROM sentry_messagefiltervalue
                    WHERE group_id = %s
                    AND key = %s
                    ORDER BY last_seen DESC
                    LIMIT 10000
                ) as a
            """,
                [group_id, key],
            )
            return cursor.fetchone()[0] or 0

        cutoff = timezone.now() - timedelta(days=7)
        return cls.objects.filter(group=group_id, key=key, last_seen__gte=cutoff).aggregate(t=Sum("times_seen"))["t"]
Example #24
0
    def get_top_group_tag_values(self, project_id, group_id,
                                 environment_id, key, limit=TOP_VALUES_DEFAULT_LIMIT):
        if db.is_postgres():
            # This doesnt guarantee percentage is accurate, but it does ensure
            # that the query has a maximum cost
            return list(
                map(
                    transformers[models.GroupTagValue],
                    models.GroupTagValue.objects.raw(
                        """
                        SELECT *
                        FROM (
                            SELECT *
                            FROM sentry_messagefiltervalue
                            WHERE group_id = %%s
                            AND key = %%s
                            ORDER BY last_seen DESC
                            LIMIT 10000
                        ) as a
                        ORDER BY times_seen DESC
                        LIMIT %d
                        """ % limit, [group_id, key]
                    )
                )
            )

        cutoff = timezone.now() - timedelta(days=7)
        return list(
            map(
                transformers[models.GroupTagValue],
                models.GroupTagValue.objects.filter(
                    group_id=group_id,
                    key=key,
                    last_seen__gte=cutoff,
                ).order_by('-times_seen')[:limit]
            )
        )
Example #25
0
 def bulk_delete(self, model, dtfield, days=None):
     if db.is_postgres():
         self._postgres_bulk_speed_delete(model, dtfield, days=days)
     else:
         self.generic_delete(model, dtfield, days=days)
Example #26
0
def bulk_delete_objects(model,
                        limit=10000,
                        transaction_id=None,
                        logger=None,
                        partition_key=None,
                        **filters):
    connection = connections[router.db_for_write(model)]
    quote_name = connection.ops.quote_name

    query = []
    params = []
    partition_query = []

    if partition_key:
        for column, value in partition_key.items():
            partition_query.append('%s = %%s' % (quote_name(column), ))
            params.append(value)

    for column, value in filters.items():
        query.append('%s = %%s' % (quote_name(column), ))
        params.append(value)

    if db.is_postgres():
        query = """
            delete from %(table)s
            where %(partition_query)s id = any(array(
                select id
                from %(table)s
                where (%(query)s)
                limit %(limit)d
            ))
        """ % dict(
            partition_query=(' AND '.join(partition_query)) +
            (' AND ' if partition_query else ''),
            query=' AND '.join(query),
            table=model._meta.db_table,
            limit=limit,
        )
    elif db.is_mysql():
        query = """
            delete from %(table)s
            where %(partition_query)s (%(query)s)
            limit %(limit)d
        """ % dict(
            partition_query=(' AND '.join(partition_query)) +
            (' AND ' if partition_query else ''),
            query=' AND '.join(query),
            table=model._meta.db_table,
            limit=limit,
        )
    else:
        if logger is not None:
            logger.warning(
                'Using slow deletion strategy due to unknown database')
        has_more = False
        for obj in model.objects.filter(**filters)[:limit]:
            obj.delete()
            has_more = True
        return has_more

    cursor = connection.cursor()
    cursor.execute(query, params)

    has_more = cursor.rowcount > 0

    if has_more and logger is not None and _leaf_re.search(
            model.__name__) is None:
        logger.info('object.delete.bulk_executed',
                    extra=dict(filters.items() + [
                        ('model', model.__name__),
                        ('transaction_id', transaction_id),
                    ]))

    return has_more
Example #27
0
import pytest
from sentry.utils.db import is_postgres
from sentry.testutils import TestCase
from sentry.constants import MAX_CULPRIT_LENGTH
from django.utils.encoding import force_text


def psycopg2_version():
    import psycopg2

    version = psycopg2.__version__.split()[0].split(".")
    return tuple(map(int, version))


@pytest.mark.skipif(
    not is_postgres() or psycopg2_version() < (2, 7),
    reason="Test requires Postgres and psycopg 2.7+",
)
class CursorWrapperTestCase(TestCase):
    def test_null_bytes(self):
        from django.db import connection

        cursor = connection.cursor()
        cursor.execute("SELECT %s", [b"Ma\x00tt"])
        assert cursor.fetchone()[0] == b"Matt"
        cursor.execute("SELECT %s", [u"Ma\x00tt"])
        assert cursor.fetchone()[0] == u"Matt"

    def test_null_bytes_at_max_len_bytes(self):
        from django.db import connection
Example #28
0
 def execute(self, chunk_size=10000):
     if db.is_postgres():
         self.execute_postgres(chunk_size)
     else:
         self.execute_generic(chunk_size)
Example #29
0
def create_default_project(id, name, slug, verbosity=2, **kwargs):
    if Project.objects.filter(id=id).exists():
        return

    try:
        user = User.objects.filter(is_superuser=True)[0]
    except IndexError:
        user, _ = User.objects.get_or_create(
            username='******',
            defaults={
                'email': 'sentry@localhost',
            }
        )

    org, _ = Organization.objects.get_or_create(
        slug='sentry',
        defaults={
            'name': 'Sentry',
        }
    )

    OrganizationMember.objects.get_or_create(
        user=user,
        organization=org,
        defaults={
            'type': OrganizationMemberType.OWNER,
            'has_global_access': True,
        },
    )

    team, _ = Team.objects.get_or_create(
        organization=org,
        slug='sentry',
        defaults={
            'name': 'Sentry',
        }
    )

    project = Project.objects.create(
        id=id,
        public=False,
        name=name,
        slug=slug,
        team=team,
        organization=team.organization,
        **kwargs
    )

    # HACK: manually update the ID after insert due to Postgres
    # sequence issues. Seriously, f**k everything about this.
    if db.is_postgres(project._state.db):
        connection = connections[project._state.db]
        cursor = connection.cursor()
        cursor.execute(PROJECT_SEQUENCE_FIX)

    project.update_option('sentry:origins', ['*'])

    if verbosity > 0:
        print('Created internal Sentry project (slug=%s, id=%s)' % (project.slug, project.id))

    return project
Example #30
0
    def query(
        self,
        projects,
        environments=None,
        sort_by="date",
        limit=100,
        cursor=None,
        count_hits=False,
        paginator_options=None,
        search_filters=None,
        date_from=None,
        date_to=None,
    ):
        from sentry.models import Group, GroupStatus, GroupSubscription

        search_filters = search_filters if search_filters is not None else []

        # ensure projects are from same org
        if len({p.organization_id for p in projects}) != 1:
            raise RuntimeError("Cross organization search not supported")

        if paginator_options is None:
            paginator_options = {}

        group_queryset = Group.objects.filter(project__in=projects).exclude(
            status__in=[
                GroupStatus.PENDING_DELETION,
                GroupStatus.DELETION_IN_PROGRESS,
                GroupStatus.PENDING_MERGE,
            ]
        )

        qs_builder_conditions = {
            "status": QCallbackCondition(lambda status: Q(status=status)),
            "bookmarked_by": QCallbackCondition(
                lambda user: Q(bookmark_set__project__in=projects, bookmark_set__user=user)
            ),
            "assigned_to": QCallbackCondition(
                functools.partial(assigned_to_filter, projects=projects)
            ),
            "unassigned": QCallbackCondition(
                functools.partial(unassigned_filter, projects=projects)
            ),
            "subscribed_by": QCallbackCondition(
                lambda user: Q(
                    id__in=GroupSubscription.objects.filter(
                        project__in=projects, user=user, is_active=True
                    ).values_list("group")
                )
            ),
            "active_at": ScalarCondition("active_at"),
        }

        message = [
            search_filter for search_filter in search_filters if search_filter.key.name == "message"
        ]
        if message and message[0].value.raw_value:
            message = message[0]
            # We only support full wildcard matching in postgres
            if is_postgres() and message.value.is_wildcard():
                group_queryset = message_regex_filter(group_queryset, message)
            else:
                # Otherwise, use the standard LIKE query
                qs_builder_conditions["message"] = QCallbackCondition(
                    lambda message: Q(Q(message__icontains=message) | Q(culprit__icontains=message))
                )

        group_queryset = QuerySetBuilder(qs_builder_conditions).build(
            group_queryset, search_filters
        )
        # filter out groups which are beyond the retention period
        retention = quotas.get_event_retention(organization=projects[0].organization)
        if retention:
            retention_window_start = timezone.now() - timedelta(days=retention)
        else:
            retention_window_start = None
        # TODO: This could be optimized when building querysets to identify
        # criteria that are logically impossible (e.g. if the upper bound
        # for last seen is before the retention window starts, no results
        # exist.)
        if retention_window_start:
            group_queryset = group_queryset.filter(last_seen__gte=retention_window_start)

        # This is a punt because the SnubaSearchBackend (a subclass) shares so much that it
        # seemed better to handle all the shared initialization and then handoff to the
        # actual backend.
        return self._query(
            projects,
            retention_window_start,
            group_queryset,
            environments,
            sort_by,
            limit,
            cursor,
            count_hits,
            paginator_options,
            search_filters,
            date_from,
            date_to,
        )