Ejemplo n.º 1
0
def bulk_delete_objects(model, limit=10000, transaction_id=None, logger=None, **filters):
    connection = connections[router.db_for_write(model)]
    quote_name = connection.ops.quote_name

    query = []
    params = []
    for column, value in filters.items():
        query.append('%s = %%s' % (quote_name(column), ))
        params.append(value)

    if db.is_postgres():
        query = """
            delete from %(table)s
            where id = any(array(
                select id
                from %(table)s
                where (%(query)s)
                limit %(limit)d
            ))
        """ % dict(
            query=' AND '.join(query),
            table=model._meta.db_table,
            limit=limit,
        )
    elif db.is_mysql():
        query = """
            delete from %(table)s
            where (%(query)s)
            limit %(limit)d
        """ % dict(
            query=' AND '.join(query),
            table=model._meta.db_table,
            limit=limit,
        )
    else:
        if logger is not None:
            logger.warning('Using slow deletion strategy due to unknown database')
        has_more = False
        for obj in model.objects.filter(**filters)[:limit]:
            obj.delete()
            has_more = True
        return has_more

    cursor = connection.cursor()
    cursor.execute(query, params)

    has_more = cursor.rowcount > 0

    if has_more and logger is not None and _leaf_re.search(model.__name__) is None:
        logger.info(
            'object.delete.bulk_executed',
            extra=dict(
                filters.items() + [
                    ('model', model.__name__),
                    ('transaction_id', transaction_id),
                ]
            )
        )

    return has_more
Ejemplo n.º 2
0
def bulk_delete_objects(model, limit=10000, transaction_id=None, logger=None, **filters):
    connection = connections[router.db_for_write(model)]
    quote_name = connection.ops.quote_name

    query = []
    params = []
    for column, value in filters.items():
        query.append('%s = %%s' % (quote_name(column), ))
        params.append(value)

    if db.is_postgres():
        query = """
            delete from %(table)s
            where id = any(array(
                select id
                from %(table)s
                where (%(query)s)
                limit %(limit)d
            ))
        """ % dict(
            query=' AND '.join(query),
            table=model._meta.db_table,
            limit=limit,
        )
    elif db.is_mysql():
        query = """
            delete from %(table)s
            where (%(query)s)
            limit %(limit)d
        """ % dict(
            query=' AND '.join(query),
            table=model._meta.db_table,
            limit=limit,
        )
    else:
        if logger is not None:
            logger.warning('Using slow deletion strategy due to unknown database')
        has_more = False
        for obj in model.objects.filter(**filters)[:limit]:
            obj.delete()
            has_more = True
        return has_more

    cursor = connection.cursor()
    cursor.execute(query, params)

    has_more = cursor.rowcount > 0

    if has_more and logger is not None and _leaf_re.search(model.__name__) is None:
        logger.info(
            'object.delete.bulk_executed',
            extra=dict(
                filters.items() + [
                    ('model', model.__name__),
                    ('transaction_id', transaction_id),
                ]
            )
        )

    return has_more
Ejemplo n.º 3
0
def import_system_symbols(bundles, threads, trim_symbols, no_demangle):
    """Imports system symbols from preprocessed zip files into Sentry.

    It takes a list of zip files as arguments that contain preprocessed
    system symbol information.  These zip files contain JSON dumps.  The
    actual zipped up dsym files cannot be used here, they need to be
    preprocessed.
    """
    import zipfile
    from sentry.utils.db import is_mysql
    if threads != 1 and is_mysql():
        warnings.warn(Warning('disabled threading for mysql'))
        threads = 1
    for path in bundles:
        with zipfile.ZipFile(path) as f:
            sdk_info = json.load(f.open('sdk_info'))
            label = ('%s.%s.%s (%s)' % (
                sdk_info['version_major'],
                sdk_info['version_minor'],
                sdk_info['version_patchlevel'],
                sdk_info['version_build'],
            )).ljust(18)
            with click.progressbar(f.namelist(), label=label) as bar:
                process_archive(bar, f, sdk_info, threads,
                                trim_symbols=trim_symbols,
                                demangle=not no_demangle)
Ejemplo n.º 4
0
def bulk_delete_objects(model,
                        group_id=None,
                        project_id=None,
                        limit=10000,
                        logger=None):
    assert group_id or project_id, 'Must pass either project_id or group_id'

    if group_id:
        column = 'group_id'
        value = group_id

    elif project_id:
        column = 'project_id'
        value = project_id

    connection = connections['default']
    quote_name = connection.ops.quote_name

    if logger is not None:
        logger.info('Removing %r objects where %s=%r', model, column, value)

    if db.is_postgres():
        query = """
            delete from %(table)s
            where id = any(array(
                select id
                from %(table)s
                where %(column)s = %%s
                limit %(limit)d
            ))
        """ % dict(
            table=model._meta.db_table,
            column=quote_name(column),
            limit=limit,
        )
        print query
        params = [value]
    elif db.is_mysql():
        query = """
            delete from %(table)s
            where %(column)s = %%s
            limit %(limit)d
        """ % dict(
            table=model._meta.db_table,
            column=quote_name(column),
            limit=limit,
        )
        params = [value]
    else:
        logger.warning('Using slow deletion strategy due to unknown database')
        has_more = False
        for obj in model.objects.filter(project=project_id)[:limit]:
            obj.delete()
            has_more = True
        return has_more

    cursor = connection.cursor()
    cursor.execute(query, params)
    return cursor.rowcount > 0
Ejemplo n.º 5
0
def increment_project_counter(project, delta=1):
    """This method primarily exists so that south code can use it."""
    if delta <= 0:
        raise ValueError('There is only one way, and that\'s up.')

    cur = connection.cursor()
    try:
        if is_postgres():
            cur.execute(
                '''
                select sentry_increment_project_counter(%s, %s)
            ''', [project.id, delta]
            )
            return cur.fetchone()[0]
        elif is_sqlite():
            value = cur.execute(
                '''
                insert or ignore into sentry_projectcounter
                  (project_id, value) values (%s, 0);
            ''', [project.id]
            )
            value = cur.execute(
                '''
                select value from sentry_projectcounter
                 where project_id = %s
            ''', [project.id]
            ).fetchone()[0]
            while 1:
                cur.execute(
                    '''
                    update sentry_projectcounter
                       set value = value + %s
                     where project_id = %s;
                ''', [delta, project.id]
                )
                changes = cur.execute(
                    '''
                    select changes();
                '''
                ).fetchone()[0]
                if changes != 0:
                    return value + delta
        elif is_mysql():
            cur.execute(
                '''
                insert into sentry_projectcounter
                            (project_id, value)
                     values (%s, @new_val := %s)
           on duplicate key
                     update value = @new_val := value + %s
            ''', [project.id, delta, delta]
            )
            cur.execute('select @new_val')
            return cur.fetchone()[0]
        else:
            raise AssertionError("Not implemented database engine path")
    finally:
        cur.close()
Ejemplo n.º 6
0
def bulk_delete_objects(model, group_id=None, project_id=None, limit=10000,
                        logger=None):
    assert group_id or project_id, 'Must pass either project_id or group_id'

    if group_id:
        column = 'group_id'
        value = group_id

    elif project_id:
        column = 'project_id'
        value = project_id

    connection = connections['default']
    quote_name = connection.ops.quote_name

    if logger is not None:
        logger.info('Removing %r objects where %s=%r', model, column, value)

    if db.is_postgres():
        query = """
            delete from %(table)s
            where id = any(array(
                select id
                from %(table)s
                where %(column)s = %%s
                limit %(limit)d
            ))
        """ % dict(
            table=model._meta.db_table,
            column=quote_name(column),
            limit=limit,
        )
        print query
        params = [value]
    elif db.is_mysql():
        query = """
            delete from %(table)s
            where %(column)s = %%s
            limit %(limit)d
        """ % dict(
            table=model._meta.db_table,
            column=quote_name(column),
            limit=limit,
        )
        params = [value]
    else:
        logger.warning('Using slow deletion strategy due to unknown database')
        has_more = False
        for obj in model.objects.filter(project=project_id)[:limit]:
            obj.delete()
            has_more = True
        return has_more

    cursor = connection.cursor()
    cursor.execute(query, params)
    return cursor.rowcount > 0
Ejemplo n.º 7
0
def bulk_delete_objects(model,
                        limit=10000,
                        logger=None,
                        using='default',
                        **filters):
    connection = connections[using]
    quote_name = connection.ops.quote_name

    query = []
    params = []
    for column, value in filters.items():
        query.append('%s = %%s' % (quote_name(column), ))
        params.append(value)

    if logger is not None:
        logger.info('Removing %r objects where %s=%r', model, column, value)

    if db.is_postgres():
        query = """
            delete from %(table)s
            where id = any(array(
                select id
                from %(table)s
                where (%(query)s)
                limit %(limit)d
            ))
        """ % dict(
            query=' AND '.join(query),
            table=model._meta.db_table,
            limit=limit,
        )
    elif db.is_mysql():
        query = """
            delete from %(table)s
            where (%(query)s)
            limit %(limit)d
        """ % dict(
            query=' AND '.join(query),
            table=model._meta.db_table,
            limit=limit,
        )
    else:
        if logger is not None:
            logger.warning(
                'Using slow deletion strategy due to unknown database')
        has_more = False
        for obj in model.objects.filter(**filters)[:limit]:
            obj.delete()
            has_more = True
        return has_more

    cursor = connection.cursor()
    cursor.execute(query, params)
    return cursor.rowcount > 0
def increment_project_counter(project, delta=1):
    """This method primarily exists so that south code can use it."""
    if delta <= 0:
        raise ValueError('There is only one way, and that\'s up.')

    cur = connection.cursor()
    try:
        if is_postgres():
            cur.execute(
                '''
                select sentry_increment_project_counter(%s, %s)
            ''', [project.id, delta])
            return cur.fetchone()[0]
        elif is_sqlite():
            value = cur.execute(
                '''
                insert or ignore into sentry_projectcounter
                  (project_id, value) values (%s, 0);
            ''', [project.id])
            value = cur.execute(
                '''
                select value from sentry_projectcounter
                 where project_id = %s
            ''', [project.id]).fetchone()[0]
            while 1:
                cur.execute(
                    '''
                    update sentry_projectcounter
                       set value = value + %s
                     where project_id = %s;
                ''', [delta, project.id])
                changes = cur.execute('''
                    select changes();
                ''').fetchone()[0]
                if changes != 0:
                    return value + delta
        elif is_mysql():
            cur.execute(
                '''
                insert into sentry_projectcounter
                            (project_id, value)
                     values (%s, @new_val := %s)
           on duplicate key
                     update value = @new_val := value + %s
            ''', [project.id, delta, delta])
            cur.execute('select @new_val')
            return cur.fetchone()[0]
        else:
            raise AssertionError("Not implemented database engine path")
    finally:
        cur.close()
Ejemplo n.º 9
0
def bulk_delete_objects(model, limit=10000, logger=None, using='default',
                        **filters):
    connection = connections[using]
    quote_name = connection.ops.quote_name

    query = []
    params = []
    for column, value in filters.items():
        query.append('%s = %%s' % (quote_name(column),))
        params.append(value)

    if logger is not None:
        logger.info('Removing %r objects where %s=%r', model, column, value)

    if db.is_postgres():
        query = """
            delete from %(table)s
            where id = any(array(
                select id
                from %(table)s
                where (%(query)s)
                limit %(limit)d
            ))
        """ % dict(
            query=' AND '.join(query),
            table=model._meta.db_table,
            limit=limit,
        )
    elif db.is_mysql():
        query = """
            delete from %(table)s
            where (%(query)s)
            limit %(limit)d
        """ % dict(
            query=' AND '.join(query),
            table=model._meta.db_table,
            limit=limit,
        )
    else:
        if logger is not None:
            logger.warning('Using slow deletion strategy due to unknown database')
        has_more = False
        for obj in model.objects.filter(**filters)[:limit]:
            obj.delete()
            has_more = True
        return has_more

    cursor = connection.cursor()
    cursor.execute(query, params)
    return cursor.rowcount > 0
Ejemplo n.º 10
0
def bulk_delete_objects(model, limit=10000,
                        logger=None, **filters):
    assert len(filters) == 1, 'Must pass a single column=value filter.'

    column, value = filters.items()[0]

    connection = connections['default']
    quote_name = connection.ops.quote_name

    if logger is not None:
        logger.info('Removing %r objects where %s=%r', model, column, value)

    if db.is_postgres():
        query = """
            delete from %(table)s
            where id = any(array(
                select id
                from %(table)s
                where %(column)s = %%s
                limit %(limit)d
            ))
        """ % dict(
            table=model._meta.db_table,
            column=quote_name(column),
            limit=limit,
        )
        params = [value]
    elif db.is_mysql():
        query = """
            delete from %(table)s
            where %(column)s = %%s
            limit %(limit)d
        """ % dict(
            table=model._meta.db_table,
            column=quote_name(column),
            limit=limit,
        )
        params = [value]
    else:
        logger.warning('Using slow deletion strategy due to unknown database')
        has_more = False
        for obj in model.objects.filter(**{column: value})[:limit]:
            obj.delete()
            has_more = True
        return has_more

    cursor = connection.cursor()
    cursor.execute(query, params)
    return cursor.rowcount > 0
Ejemplo n.º 11
0
            def get_child_relations(self, instance):
                # in bulk
                model_list = (models.GroupTagValue, models.GroupTagKey, models.TagValue)

                # required to deal with custom SQL queries and the ORM
                # in `bulk_delete_objects`
                key_id_field_name = 'key_id' if (db.is_postgres() or db.is_mysql()) else '_key_id'

                relations = [
                    ModelRelation(m, query={
                        'project_id': instance.project_id,
                        key_id_field_name: instance.id,
                    }, partition_key={'project_id': instance.project_id}) for m in model_list
                ]
                return relations
Ejemplo n.º 12
0
            def get_child_relations(self, instance):
                # in bulk
                model_list = (models.GroupTagValue, models.GroupTagKey, models.TagValue)

                # required to deal with custom SQL queries and the ORM
                # in `bulk_delete_objects`
                key_id_field_name = 'key_id' if (db.is_postgres() or db.is_mysql()) else '_key_id'

                relations = [
                    ModelRelation(m, query={
                        'project_id': instance.project_id,
                        key_id_field_name: instance.id,
                    }, partition_key={'project_id': instance.project_id}) for m in model_list
                ]
                return relations
Ejemplo n.º 13
0
def remove_excess_recent_searches(organization, user, search_type):
    """
    Remove any excess recent searches. We do this by sorting by `last_seen`
    descending and removing any rows after the `MAX_RECENT_SEARCHES` row. In
    practice this should only be removing a single row at most.
    """
    recent_searches_to_remove = RecentSearch.objects.filter(
        organization=organization,
        user=user,
        type=search_type,
    ).order_by('-last_seen')[MAX_RECENT_SEARCHES:]
    if is_mysql():
        # Mysql doesn't support limits in these types of subqueries
        recent_searches_to_remove = list(
            recent_searches_to_remove.values_list("id", flat=True))
    RecentSearch.objects.filter(id__in=recent_searches_to_remove).delete()
Ejemplo n.º 14
0
def scoreclause_sql(sc, connection):
    db = getattr(connection, 'alias', 'default')
    has_values = sc.last_seen is not None and sc.times_seen is not None
    if is_postgres(db):
        if has_values:
            sql = 'log(times_seen + %d) * 600 + %d' % (sc.times_seen, to_timestamp(sc.last_seen))
        else:
            sql = 'log(times_seen) * 600 + last_seen::abstime::int'
    elif is_mysql(db):
        if has_values:
            sql = 'log(times_seen + %d) * 600 + %d' % (sc.times_seen, to_timestamp(sc.last_seen))
        else:
            sql = 'log(times_seen) * 600 + unix_timestamp(last_seen)'
    else:
        # XXX: if we cant do it atomically let's do it the best we can
        sql = int(sc)

    return (sql, [])
Ejemplo n.º 15
0
def scoreclause_sql(sc, connection):
    db = getattr(connection, 'alias', 'default')
    has_values = sc.last_seen is not None and sc.times_seen is not None
    if is_postgres(db):
        if has_values:
            sql = 'log(times_seen + %d) * 600 + %d' % (
                sc.times_seen, to_timestamp(sc.last_seen))
        else:
            sql = 'log(times_seen) * 600 + last_seen::abstime::int'
    elif is_mysql(db):
        if has_values:
            sql = 'log(times_seen + %d) * 600 + %d' % (
                sc.times_seen, to_timestamp(sc.last_seen))
        else:
            sql = 'log(times_seen) * 600 + unix_timestamp(last_seen)'
    else:
        # XXX: if we cant do it atomically let's do it the best we can
        sql = int(sc)

    return (sql, [])
Ejemplo n.º 16
0
class DateTimePaginatorTest(TestCase):
    def test_ascending(self):
        joined = timezone.now()

        # The DateTime pager only has accuracy up to 1000th of a second.
        # Everythng can't be added within less than 10 microseconds of each
        # other. This is handled by the pager (see test_rounding_offset), but
        # this case shouldn't rely on it.
        res1 = self.create_user('*****@*****.**', date_joined=joined)
        res2 = self.create_user('*****@*****.**',
                                date_joined=joined + timedelta(seconds=1))
        res3 = self.create_user('*****@*****.**',
                                date_joined=joined + timedelta(seconds=2))
        res4 = self.create_user('*****@*****.**',
                                date_joined=joined + timedelta(seconds=3))

        queryset = User.objects.all()

        paginator = DateTimePaginator(queryset, 'date_joined')
        result1 = paginator.get_result(limit=2, cursor=None)
        assert len(result1) == 2, result1
        assert result1[0] == res1
        assert result1[1] == res2
        assert result1.next
        assert not result1.prev

        result2 = paginator.get_result(limit=2, cursor=result1.next)
        assert len(result2) == 2, result2
        assert result2[0] == res3
        assert result2[1] == res4
        assert not result2.next
        assert result2.prev

        result3 = paginator.get_result(limit=1, cursor=result2.prev)
        assert len(result3) == 1, result3
        assert result3[0] == res2
        assert result3.next
        assert result3.prev

        result4 = paginator.get_result(limit=1, cursor=result3.prev)
        assert len(result4) == 1, result4
        assert result4[0] == res1
        assert result4.next
        assert not result4.prev

    def test_descending(self):
        joined = timezone.now()

        res1 = self.create_user('*****@*****.**', date_joined=joined)
        res2 = self.create_user('*****@*****.**',
                                date_joined=joined + timedelta(seconds=1))
        res3 = self.create_user('*****@*****.**',
                                date_joined=joined + timedelta(seconds=2))

        queryset = User.objects.all()

        paginator = DateTimePaginator(queryset, '-date_joined')
        result1 = paginator.get_result(limit=1, cursor=None)
        assert len(result1) == 1, result1
        assert result1[0] == res3
        assert result1.next
        assert not result1.prev

        result2 = paginator.get_result(limit=2, cursor=result1.next)
        assert len(result2) == 2, result2
        assert result2[0] == res2
        assert result2[1] == res1
        assert not result2.next
        assert result2.prev

        result3 = paginator.get_result(limit=2, cursor=result2.prev)
        assert len(result3) == 1, result3
        assert result3[0] == res3
        assert result3.next
        assert not result3.prev

    def test_prev_descending_with_new(self):
        joined = timezone.now()

        res1 = self.create_user('*****@*****.**', date_joined=joined)
        res2 = self.create_user('*****@*****.**',
                                date_joined=joined + timedelta(seconds=1))

        queryset = User.objects.all()

        paginator = DateTimePaginator(queryset, '-date_joined')
        result1 = paginator.get_result(limit=10, cursor=None)
        assert len(result1) == 2, result1
        assert result1[0] == res2
        assert result1[1] == res1

        res3 = self.create_user('*****@*****.**',
                                date_joined=joined + timedelta(seconds=2))
        res4 = self.create_user('*****@*****.**',
                                date_joined=joined + timedelta(seconds=3))

        result2 = paginator.get_result(limit=10, cursor=result1.prev)
        assert len(result2) == 2, result2
        assert result2[0] == res4
        assert result2[1] == res3

        result3 = paginator.get_result(limit=10, cursor=result2.prev)
        assert len(result3) == 0, result3

        result4 = paginator.get_result(limit=10, cursor=result1.next)
        assert len(result4) == 0, result4

    @pytest.mark.skipif(is_mysql(),
                        reason='MySQL does not support above second accuracy')
    def test_roudning_offset(self):
        joined = timezone.now()

        res1 = self.create_user('*****@*****.**', date_joined=joined)
        res2 = self.create_user('*****@*****.**',
                                date_joined=joined + timedelta(microseconds=1))
        res3 = self.create_user('*****@*****.**',
                                date_joined=joined + timedelta(microseconds=2))
        res4 = self.create_user('*****@*****.**',
                                date_joined=joined + timedelta(microseconds=3))

        queryset = User.objects.all()

        paginator = DateTimePaginator(queryset, 'date_joined')
        result1 = paginator.get_result(limit=3, cursor=None)
        assert len(result1) == 3, result1
        assert result1[0] == res1
        assert result1[1] == res2
        assert result1[2] == res3

        result2 = paginator.get_result(limit=10, cursor=result1.next)
        assert len(result2) == 1, result2
        assert result2[0] == res4

        result3 = paginator.get_result(limit=2, cursor=result2.prev)
        assert len(result3) == 2, result3
        assert result3[0] == res2
        assert result3[1] == res3

        result4 = paginator.get_result(limit=1, cursor=result3.prev)
        assert len(result4) == 1, result4
        assert result4[0] == res1

        result5 = paginator.get_result(limit=10, cursor=result4.prev)
        assert len(result5) == 0, list(result5)