class PopLeftListF(BaseExpression):

    sql_expression = collapse_spaces("""
        IF(
            (@tmp_c:=LOCATE(',', @tmp_f:=%s)) > 0,
            SUBSTRING(@tmp_f, @tmp_c + 1),
            ''
        )
    """)

    def __init__(self, lhs):
        super(BaseExpression, self).__init__()
        self.lhs = lhs

    def get_source_expressions(self):
        return [self.lhs]

    def set_source_expressions(self, exprs):
        self.lhs = exprs[0]

    def as_sql(self, compiler, connection):
        field, field_params = compiler.compile(self.lhs)

        sql = self.sql_expression % (field)
        return sql, field_params
class PopListF(BaseExpression):

    sql_expression = collapse_spaces("""
        SUBSTRING(
            @tmp_f:=%s,
            1,
            IF(
                LOCATE(',', @tmp_f),
                (
                    CHAR_LENGTH(@tmp_f) -
                    CHAR_LENGTH(SUBSTRING_INDEX(@tmp_f, ',', -1)) -
                    1
                ),
                0
            )
        )
    """)

    def __init__(self, lhs):
        super(BaseExpression, self).__init__()
        self.lhs = lhs

    def get_source_expressions(self):
        return [self.lhs]

    def set_source_expressions(self, exprs):
        self.lhs = exprs[0]

    def as_sql(self, compiler, connection):
        field, field_params = compiler.compile(self.lhs)

        sql = self.sql_expression % (field)
        return sql, field_params
Esempio n. 3
0
class PopLeftListF(BaseExpression):

    sql_expression = collapse_spaces("""
        IF(
            (@tmp_c:=LOCATE(',', @tmp_f:=%s)) > 0,
            SUBSTRING(@tmp_f, @tmp_c + 1),
            ''
        )
    """)

    def __init__(self, lhs: BaseExpression) -> None:
        super().__init__()
        self.lhs = lhs

    def get_source_expressions(self) -> list[BaseExpression]:
        return [self.lhs]

    def set_source_expressions(self, exprs: Iterable[BaseExpression]) -> None:
        (self.lhs, ) = exprs

    def as_sql(
        self,
        compiler: SQLCompiler,
        connection: BaseDatabaseWrapper,
    ) -> tuple[str, tuple[Any, ...]]:
        field, field_params = compiler.compile(self.lhs)

        sql = self.sql_expression % (field)
        return sql, tuple(field_params)
Esempio n. 4
0
class Command(BaseCommand):
    args = "<optional cache aliases>"

    help = collapse_spaces("""
        Runs cache.cull() on all your MySQLCache caches, or only those
        specified aliases.
    """)

    def handle(self, *aliases, **options):
        verbosity = options.get('verbosity')

        if aliases:
            names = set(aliases)
        else:
            names = settings.CACHES

        for alias in names:
            try:
                cache = caches[alias]
            except InvalidCacheBackendError:
                raise CommandError("Cache '{}' does not exist".format(alias))

            if not isinstance(cache, MySQLCache):  # pragma: no cover
                continue

            if verbosity >= 1:
                self.stdout.write("Deleting from cache '{}'... ".format(alias),
                                  ending='')
            num_deleted = cache.cull()
            if verbosity >= 1:
                self.stdout.write("{} entries deleted.".format(num_deleted))
Esempio n. 5
0
class AddSetF(TwoSidedExpression):

    # A slightly complicated expression.
    # basically if 'value' is not in the set, concat the current set with a
    # comma and 'value'
    # N.B. using MySQL side variables to avoid repeat calculation of
    # expression[s]
    sql_expression = collapse_spaces("""
        IF(
            FIND_IN_SET(@tmp_val:=%s, @tmp_f:=%s),
            @tmp_f,
            CONCAT_WS(
                ',',
                IF(CHAR_LENGTH(@tmp_f), @tmp_f, NULL),
                @tmp_val
            )
        )
    """)

    def as_sql(self, compiler, connection):
        field, field_params = compiler.compile(self.lhs)
        value, value_params = compiler.compile(self.rhs)

        sql = self.sql_expression % (value, field)
        params = tuple(value_params) + tuple(field_params)

        return sql, params
class AppendLeftListF(TwoSidedExpression):

    # A slightly complicated expression.
    # basically if 'value' is not in the set, concat the current set with a
    # comma and 'value'
    # N.B. using MySQL side variables to avoid repeat calculation of
    # expression[s]
    sql_expression = collapse_spaces("""
        CONCAT_WS(
            ',',
            %s,
            IF(
                (@tmp_f:=%s) > '',
                @tmp_f,
                NULL
            )
        )
    """)

    def as_sql(self, compiler, connection):
        field, field_params = compiler.compile(self.lhs)
        value, value_params = compiler.compile(self.rhs)

        sql = self.sql_expression % (value, field)

        params = []
        params.extend(field_params)
        params.extend(value_params)

        return sql, params
Esempio n. 7
0
class AppendListF(TwoSidedExpression):

    # A slightly complicated expression.
    # basically if 'value' is not in the set, concat the current set with a
    # comma and 'value'
    # N.B. using MySQL side variables to avoid repeat calculation of
    # expression[s]
    sql_expression = collapse_spaces("""
        CONCAT_WS(
            ',',
            IF(
                (@tmp_f:=%s) > '',
                @tmp_f,
                NULL
            ),
            %s
        )
    """)

    def as_sql(
        self,
        compiler: SQLCompiler,
        connection: BaseDatabaseWrapper,
    ) -> tuple[str, tuple[Any, ...]]:
        field, field_params = compiler.compile(self.lhs)
        value, value_params = compiler.compile(self.rhs)

        sql = self.sql_expression % (field, value)
        params = tuple(value_params) + tuple(field_params)

        return sql, params
Esempio n. 8
0
class Command(BaseCommand):
    args = "<app_name>"

    help = collapse_spaces("""
        Outputs a migration that will create a table.
    """)

    if django.VERSION[:2] >= (1, 10):

        def add_arguments(self, parser):
            parser.add_argument(
                'aliases',
                metavar='aliases',
                nargs='*',
                help='Specify the cache alias(es) to create migrations for.',
            )

    def handle(self, *args, **options):
        if django.VERSION[:2] >= (1, 10):
            aliases = set(options['aliases'])
        else:
            aliases = set(args)

        if not aliases:
            aliases = settings.CACHES

        tables = set()
        for alias in aliases:
            try:
                cache = caches[alias]
            except InvalidCacheBackendError:
                raise CommandError("Cache '{}' does not exist".format(alias))

            if not isinstance(cache, MySQLCache):  # pragma: no cover
                continue

            tables.add(cache._table)

        if not tables:
            self.stderr.write("No MySQLCache instances in CACHES")
            return

        migration = self.render_migration(tables)
        self.stdout.write(migration)

    def render_migration(self, tables):
        # This used to use a Django template, but we can't instantiate them
        # direct now, as the user may not have the django template engine
        # defined in TEMPLATES
        out = [header]
        for table in tables:
            out.append(table_operation.replace('{{ table }}', table), )
        out.append(footer)
        return ''.join(out)
Esempio n. 9
0
def strict_mode_warning(alias):
    message = "MySQL Strict Mode is not set for database connection '{}'"
    hint = collapse_spaces("""
        MySQL's Strict Mode fixes many data integrity problems in MySQL, such
        as data truncation upon insertion, by escalating warnings into errors.
        It is strongly recommended you activate it. See:
        https://django-mysql.readthedocs.io/en/latest/checks.html#django-mysql-w001-strict-mode
    """)
    return Warning(
        message.format(alias),
        hint=hint,
        id='django_mysql.W001',
    )
Esempio n. 10
0
def strict_mode_warning(alias):
    message = "MySQL Strict Mode is not set for database connection '{}'"
    hint = collapse_spaces("""
        MySQL's Strict Mode fixes many data integrity problems in MySQL, such
        as data truncation upon insertion, by escalating warnings into errors.
        It is strongly recommended you activate it. See:
        http://django-mysql.readthedocs.org/en/latest/checks.html#django-mysql-w001-strict-mode
    """)
    return Warning(
        message.format(alias),
        hint=hint,
        id='django_mysql.W001',
    )
Esempio n. 11
0
def utf8mb4_warning(alias):
    message = "The character set is not utf8mb4 for database connection '{}'"
    hint = collapse_spaces("""
        The default 'utf8' character set does not include support for all
        Unicode characters. It's strongly recommended you move to use
        'utf8mb4'. See:
        https://django-mysql.readthedocs.io/en/latest/checks.html#django-mysql-w003-utf8mb4
    """)

    return Warning(
        message.format(alias),
        hint=hint,
        id='django_mysql.W003',
    )
Esempio n. 12
0
def innodb_strict_mode_warning(alias):
    message = "InnoDB Strict Mode is not set for database connection '{}'"
    hint = collapse_spaces("""
        InnoDB Strict Mode escalates several warnings around InnoDB-specific
        statements into errors. It's recommended you activate this, but it's
        not very likely to affect you if you don't. See:
        https://django-mysql.readthedocs.io/en/latest/checks.html#django-mysql-w002-innodb-strict-mode
    """)

    return Warning(
        message.format(alias),
        hint=hint,
        id='django_mysql.W002',
    )
Esempio n. 13
0
def utf8mb4_warning(alias):
    message = "The character set is not utf8mb4 for database connection '{}'"
    hint = collapse_spaces("""
        The default 'utf8' character set does not include support for all
        Unicode characters. It's strongly recommended you move to use
        'utf8mb4'. See:
        http://django-mysql.readthedocs.org/en/latest/checks.html#django-mysql-w003-utf8mb4
    """)

    return Warning(
        message.format(alias),
        hint=hint,
        id='django_mysql.W003',
    )
Esempio n. 14
0
def innodb_strict_mode_warning(alias):
    message = "InnoDB Strict Mode is not set for database connection '{}'"
    hint = collapse_spaces("""
        InnoDB Strict Mode escalates several warnings around InnoDB-specific
        statements into errors. It's recommended you activate this, but it's
        not very likely to affect you if you don't. See:
        http://django-mysql.readthedocs.org/en/latest/checks.html#django-mysql-w002-innodb-strict-mode
    """)

    return Warning(
        message.format(alias),
        hint=hint,
        id='django_mysql.W002',
    )
Esempio n. 15
0
class RemoveSetF(TwoSidedExpression):

    # Wow, this is a real doozy of an expression.
    # Basically, if it IS in the set, cut the string up to be everything except
    # that element.
    # There are some tricks going on - e.g. LEAST to evaluate a sub expression
    # but not use it in the output of CONCAT_WS
    sql_expression = collapse_spaces("""
        IF(
            @tmp_pos:=FIND_IN_SET(%s, @tmp_f:=%s),
            CONCAT_WS(
                ',',
                LEAST(
                    @tmp_len:=(
                        CHAR_LENGTH(@tmp_f) -
                        CHAR_LENGTH(REPLACE(@tmp_f, ',', '')) +
                        IF(CHAR_LENGTH(@tmp_f), 1, 0)
                    ),
                    NULL
                ),
                CASE WHEN
                    (@tmp_before:=SUBSTRING_INDEX(@tmp_f, ',', @tmp_pos - 1))
                    = ''
                    THEN NULL
                    ELSE @tmp_before
                END,
                CASE WHEN
                    (@tmp_after:=
                        SUBSTRING_INDEX(@tmp_f, ',', - (@tmp_len - @tmp_pos)))
                    = ''
                    THEN NULL
                    ELSE @tmp_after
                END
            ),
            @tmp_f
        )
    """)

    def as_sql(
        self,
        compiler: SQLCompiler,
        connection: BaseDatabaseWrapper,
    ) -> tuple[str, tuple[Any, ...]]:
        field, field_params = compiler.compile(self.lhs)
        value, value_params = compiler.compile(self.rhs)

        sql = self.sql_expression % (value, field)
        params = tuple(value_params) + tuple(field_params)

        return sql, params
Esempio n. 16
0
class SetLength(Transform):
    lookup_name = 'len'
    output_field = IntegerField()

    # No str.count equivalent in MySQL :(
    expr = collapse_spaces("""
        (
            CHAR_LENGTH(%s) -
            CHAR_LENGTH(REPLACE(%s, ',', '')) +
            IF(CHAR_LENGTH(%s), 1, 0)
        )
    """)

    def as_sql(self, compiler, connection):
        lhs, params = compiler.compile(self.lhs)
        return self.expr % (lhs, lhs, lhs), params
Esempio n. 17
0
class Command(BaseCommand):
    args = "<app_name>"

    help = collapse_spaces("""
        Outputs a migration that will create a table.
    """)

    def add_arguments(self, parser: argparse.ArgumentParser) -> None:
        parser.add_argument(
            "aliases",
            metavar="aliases",
            nargs="*",
            help="Specify the cache alias(es) to create migrations for.",
        )

    def handle(self, *args: Any, aliases: list[str], **options: Any) -> None:
        if not aliases:
            aliases = list(settings.CACHES)

        tables = set()
        for alias in aliases:
            try:
                cache = caches[alias]
            except InvalidCacheBackendError:
                raise CommandError(f"Cache '{alias}' does not exist")

            if not isinstance(cache, MySQLCache):  # pragma: no cover
                continue

            tables.add(cache._table)

        if not tables:
            self.stderr.write("No MySQLCache instances in CACHES")
            return

        migration = self.render_migration(tables)
        self.stdout.write(migration)

    def render_migration(self, tables: set[str]) -> str:
        # This used to use a Django template, but we can't instantiate them
        # direct now, as the user may not have the django template engine
        # defined in TEMPLATES
        out = [header]
        for table in tables:
            out.append(table_operation.replace("{{ table }}", table))
        out.append(footer)
        return "".join(out)
Esempio n. 18
0
class SetLength(Transform):
    lookup_name = "len"
    output_field = IntegerField()

    # No str.count equivalent in MySQL :(
    expr = collapse_spaces("""
        (
            CHAR_LENGTH(%s) -
            CHAR_LENGTH(REPLACE(%s, ',', '')) +
            IF(CHAR_LENGTH(%s), 1, 0)
        )
    """)

    def as_sql(self, compiler: SQLCompiler,
               connection: BaseDatabaseWrapper) -> tuple[str, Iterable[Any]]:
        lhs, params = compiler.compile(self.lhs)
        return self.expr % (lhs, lhs, lhs), params
Esempio n. 19
0
    def _check_default(self):
        errors = []
        if isinstance(self.default, (list, dict)):
            errors.append(
                checks.Error(
                    "Do not use mutable defaults for JSONField",
                    hint=collapse_spaces("""
                        Mutable defaults get shared between all instances of
                        the field, which probably isn't what you want. You
                        should replace your default with a callable, e.g.
                        replace default={{}} with default=dict.

                        The default you passed was '{}'.
                    """.format(self.default)),
                    obj=self,
                    id="django_mysql.E017",
                ))
        return errors
Esempio n. 20
0
class Command(BaseCommand):
    args = "<optional cache aliases>"

    help = collapse_spaces("""
        Runs cache.cull() on all your MySQLCache caches, or only those
        specified aliases.
    """)

    if django.VERSION[:2] >= (1, 10):

        def add_arguments(self, parser):
            parser.add_argument(
                'aliases', metavar='aliases', nargs='*',
                help='Specify the cache alias(es) to cull.',
            )

    def handle(self, *args, **options):
        verbosity = options.get('verbosity')

        if django.VERSION[:2] >= (1, 10):
            aliases = set(options['aliases'])
        else:
            aliases = set(args)

        if not aliases:
            aliases = settings.CACHES

        for alias in aliases:
            try:
                cache = caches[alias]
            except InvalidCacheBackendError:
                raise CommandError("Cache '{}' does not exist".format(alias))

            if not isinstance(cache, MySQLCache):  # pragma: no cover
                continue

            if verbosity >= 1:
                self.stdout.write(
                    "Deleting from cache '{}'... ".format(alias),
                    ending='',
                )
            num_deleted = cache.cull()
            if verbosity >= 1:
                self.stdout.write("{} entries deleted.".format(num_deleted))
Esempio n. 21
0
    def _check_default(self):
        errors = []
        if isinstance(self.default, (list, dict)):
            errors.append(
                checks.Error(
                    'Do not use mutable defaults for JSONField',
                    hint=collapse_spaces('''
                        Mutable defaults get shared between all instances of
                        the field, which probably isn't what you want. You
                        should replace your default with a callable, e.g.
                        replace default={{}} with default=dict.

                        The default you passed was '{}'.
                    '''.format(self.default)),
                    obj=self,
                    id='django_mysql.E017',
                )
            )
        return errors
class Command(BaseCommand):
    args = "<app_name>"

    help = collapse_spaces("""
        Outputs a migration that will create a table.
    """)

    def handle(self, *aliases, **options):
        if aliases:
            names = set(aliases)
        else:
            names = settings.CACHES

        tables = set()
        for alias in names:
            try:
                cache = caches[alias]
            except InvalidCacheBackendError:
                raise CommandError("Cache '{}' does not exist".format(alias))

            if not isinstance(cache, MySQLCache):  # pragma: no cover
                continue

            tables.add(cache._table)

        if not tables:
            self.stderr.write("No MySQLCache instances in CACHES")
            return

        migration = self.render_migration(tables)
        self.stdout.write(migration)

    def render_migration(self, tables):
        # This used to use a Django template, but we can't instantiate them
        # direct now, as the user may not have the django template engine
        # defined in TEMPLATES
        out = [header]
        for table in tables:
            out.append(table_operation.replace('{{ table }}', table))
        out.append(footer)
        return ''.join(out)
Esempio n. 23
0
class Command(BaseCommand):
    args = "<optional cache aliases>"

    help = collapse_spaces(
        """
        Runs cache.cull() on all your MySQLCache caches, or only those
        specified aliases.
    """
    )

    def add_arguments(self, parser):
        parser.add_argument(
            "aliases",
            metavar="aliases",
            nargs="*",
            help="Specify the cache alias(es) to cull.",
        )

    def handle(self, *args, **options):
        verbosity = options.get("verbosity")

        aliases = set(options["aliases"])

        if not aliases:
            aliases = settings.CACHES

        for alias in aliases:
            try:
                cache = caches[alias]
            except InvalidCacheBackendError:
                raise CommandError(f"Cache '{alias}' does not exist")

            if not isinstance(cache, MySQLCache):  # pragma: no cover
                continue

            if verbosity >= 1:
                self.stdout.write(f"Deleting from cache '{alias}'... ", ending="")
            num_deleted = cache.cull()
            if verbosity >= 1:
                self.stdout.write(f"{num_deleted} entries deleted.")
Esempio n. 24
0
class MySQLCache(BaseDatabaseCache):

    # Got an error with the add() query using BIGINT_UNSIGNED_MAX, so use a
    # value slightly 1 bit less (still an incalculable time into the future of
    # 1970)
    FOREVER_TIMEOUT = BIGINT_UNSIGNED_MAX >> 1

    create_table_sql = dedent('''\
        CREATE TABLE `{table_name}` (
            cache_key varchar(255) CHARACTER SET utf8 COLLATE utf8_bin
                                   NOT NULL PRIMARY KEY,
            value longblob NOT NULL,
            value_type char(1) CHARACTER SET latin1 COLLATE latin1_bin
                               NOT NULL DEFAULT 'p',
            expires BIGINT UNSIGNED NOT NULL
        );
    ''')

    @classmethod
    def _now(cls):
        # Values in the expires column are milliseconds since unix epoch (UTC)
        return int(time() * 1000)

    def __init__(self, table, params):
        super(MySQLCache, self).__init__(table, params)
        options = params.get('OPTIONS', {})
        self._compress_min_length = options.get('COMPRESS_MIN_LENGTH', 5000)
        self._compress_level = options.get('COMPRESS_LEVEL', 6)
        self._cull_probability = options.get('CULL_PROBABILITY', 0.01)

        # Figure out our *reverse* key function
        if self.key_func is default_key_func:
            self.reverse_key_func = default_reverse_key_func
            if ':' in self.key_prefix:
                raise ValueError(
                    "Cannot use the default KEY_FUNCTION and "
                    "REVERSE_KEY_FUNCTION if you have a colon in your "
                    "KEY_PREFIX.")
        else:
            reverse_key_func = params.get('REVERSE_KEY_FUNCTION', None)
            self.reverse_key_func = get_reverse_key_func(reverse_key_func)

    # Django API + helpers

    def get(self, key, default=None, version=None):
        key = self.make_key(key, version=version)
        self.validate_key(key)
        db = router.db_for_read(self.cache_model_class)
        table = connections[db].ops.quote_name(self._table)

        with connections[db].cursor() as cursor:
            cursor.execute(self._get_query.format(table=table),
                           (key, self._now()))
            row = cursor.fetchone()

        if row is None:
            return default
        else:
            value, value_type = row
            return self.decode(value, value_type)

    _get_query = collapse_spaces("""
        SELECT value, value_type
        FROM {table}
        WHERE cache_key = %s AND
              expires >= %s
    """)

    def get_many(self, keys, version=None):
        made_key_to_key = {
            self.make_key(key, version=version): key
            for key in keys
        }
        made_keys = list(made_key_to_key.keys())
        for key in made_keys:
            self.validate_key(key)

        db = router.db_for_read(self.cache_model_class)
        table = connections[db].ops.quote_name(self._table)

        with connections[db].cursor() as cursor:
            cursor.execute(self._get_many_query.format(table=table),
                           (made_keys, self._now()))
            rows = cursor.fetchall()

        data = {}

        for made_key, value, value_type in rows:
            key = made_key_to_key[made_key]
            data[key] = self.decode(value, value_type)

        return data

    _get_many_query = collapse_spaces("""
        SELECT cache_key, value, value_type
        FROM {table}
        WHERE cache_key IN %s AND
              expires >= %s
    """)

    def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
        key = self.make_key(key, version=version)
        self.validate_key(key)
        self._base_set('set', key, value, timeout)

    def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
        key = self.make_key(key, version=version)
        self.validate_key(key)
        return self._base_set('add', key, value, timeout)

    def _base_set(self, mode, key, value, timeout=DEFAULT_TIMEOUT):
        exp = self.get_backend_timeout(timeout)
        db = router.db_for_write(self.cache_model_class)
        table = connections[db].ops.quote_name(self._table)

        self._maybe_cull()
        with connections[db].cursor() as cursor:

            value, value_type = self.encode(value)

            if mode == 'set':
                query = self._set_query
                params = (key, value, value_type, exp)
            elif mode == 'add':
                query = self._add_query
                params = (key, value, value_type, exp, self._now())

            cursor.execute(query.format(table=table), params)

            if mode == 'set':
                return True
            elif mode == 'add':
                # Use a special code in the add query for "did insert"
                insert_id = cursor.lastrowid
                return (insert_id != 444)

    _set_many_query = collapse_spaces("""
        INSERT INTO {table} (cache_key, value, value_type, expires)
        VALUES {{VALUES_CLAUSE}}
        ON DUPLICATE KEY UPDATE
            value=VALUES(value),
            value_type=VALUES(value_type),
            expires=VALUES(expires)
    """)

    _set_query = _set_many_query.replace('{{VALUES_CLAUSE}}',
                                         '(%s, %s, %s, %s)')

    # Uses the IFNULL / LEAST / LAST_INSERT_ID trick to communicate the special
    # value of 444 back to the client (LAST_INSERT_ID is otherwise 0, since
    # there is no AUTO_INCREMENT column)
    _add_query = collapse_spaces("""
        INSERT INTO {table} (cache_key, value, value_type, expires)
        VALUES (%s, %s, %s, %s)
        ON DUPLICATE KEY UPDATE
            value=IF(expires > @tmp_now:=%s, value, VALUES(value)),
            value_type=IF(expires > @tmp_now, value_type, VALUES(value_type)),
            expires=IF(
                expires > @tmp_now,
                IFNULL(
                    LEAST(LAST_INSERT_ID(444), NULL),
                    expires
                ),
                VALUES(expires)
            )
    """)

    def set_many(self, data, timeout=DEFAULT_TIMEOUT, version=None):
        exp = self.get_backend_timeout(timeout)
        db = router.db_for_write(self.cache_model_class)
        table = connections[db].ops.quote_name(self._table)

        self._maybe_cull()

        params = []
        for key, value in six.iteritems(data):
            made_key = self.make_key(key, version=version)
            self.validate_key(made_key)
            value, value_type = self.encode(value)
            params.extend((made_key, value, value_type, exp))

        query = self._set_many_query.replace(
            '{{VALUES_CLAUSE}}',
            ','.join('(%s, %s, %s, %s)' for key in data)).format(table=table)

        with connections[db].cursor() as cursor:
            cursor.execute(query, params)

    def delete(self, key, version=None):
        key = self.make_key(key, version=version)
        self.validate_key(key)

        db = router.db_for_write(self.cache_model_class)
        table = connections[db].ops.quote_name(self._table)

        with connections[db].cursor() as cursor:
            cursor.execute(self._delete_query.format(table=table), (key, ))

    _delete_query = collapse_spaces("""
        DELETE FROM {table}
        WHERE cache_key = %s
    """)

    def delete_many(self, keys, version=None):
        made_keys = [self.make_key(key, version=version) for key in keys]
        for key in made_keys:
            self.validate_key(key)

        db = router.db_for_write(self.cache_model_class)
        table = connections[db].ops.quote_name(self._table)

        with connections[db].cursor() as cursor:
            cursor.execute(self._delete_many_query.format(table=table),
                           (made_keys, ))

    _delete_many_query = collapse_spaces("""
        DELETE FROM {table}
        WHERE cache_key IN %s
    """)

    def has_key(self, key, version=None):
        key = self.make_key(key, version=version)
        self.validate_key(key)

        db = router.db_for_read(self.cache_model_class)
        table = connections[db].ops.quote_name(self._table)

        with connections[db].cursor() as cursor:
            cursor.execute(self._has_key_query.format(table=table),
                           (key, self._now()))
            return cursor.fetchone() is not None

    _has_key_query = collapse_spaces("""
        SELECT 1 FROM {table}
        WHERE cache_key = %s and expires > %s
    """)

    def incr(self, key, delta=1, version=None):
        return self._base_delta(key, delta, version, '+')

    def decr(self, key, delta=1, version=None):
        return self._base_delta(key, delta, version, '-')

    def _base_delta(self, key, delta, version, operation):
        key = self.make_key(key, version=version)
        self.validate_key(key)

        db = router.db_for_write(self.cache_model_class)
        table = connections[db].ops.quote_name(self._table)

        with connections[db].cursor() as cursor:
            updated = cursor.execute(
                self._delta_query.format(table=table, operation=operation),
                (delta, key))

            if not updated:
                raise ValueError("Key '%s' not found, or not an integer" % key)

            # New value stored in insert_id
            return cursor.lastrowid

    # Looks a bit tangled to turn the blob back into an int for updating, but
    # it works. Stores the new value for insert_id() with LAST_INSERT_ID
    _delta_query = collapse_spaces("""
        UPDATE {table}
        SET value = LAST_INSERT_ID(
            CAST(value AS SIGNED INTEGER)
            {operation}
            %s
        )
        WHERE cache_key = %s AND
              value_type = 'i'
    """)

    def clear(self):
        db = router.db_for_write(self.cache_model_class)
        table = connections[db].ops.quote_name(self._table)
        with connections[db].cursor() as cursor:
            cursor.execute("DELETE FROM {table}".format(table=table))

    def validate_key(self, key):
        """
        Django normally warns about maximum key length, but we error on it.
        """
        if len(key) > 250:
            raise ValueError(
                "Cache key is longer than the maxmimum 250 characters: {}".
                format(key))
        return super(MySQLCache, self).validate_key(key)

    def encode(self, obj):
        """
        Take a Python object and return it as a tuple (value, value_type), a
        blob and a one-char code for what type it is
        """
        if self._is_valid_mysql_bigint(obj):
            return obj, 'i'

        value = pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)
        value_type = 'p'
        if (self._compress_min_length
                and len(value) >= self._compress_min_length):
            value = zlib.compress(value, self._compress_level)
            value_type = 'z'
        return value, value_type

    def _is_valid_mysql_bigint(self, value):
        return (
            # Can't support int/long subclasses since they should are expected
            # to decode back to the same object
            (type(value) in six.integer_types) and
            # Can't go beyond these ranges
            BIGINT_SIGNED_MIN <= value <= BIGINT_SIGNED_MAX)

    def decode(self, value, value_type):
        """
        Take a value blob and its value_type one-char code and convert it back
        to a python object
        """
        if value_type == 'i':
            return int(value)

        if value_type == 'z':
            value = zlib.decompress(value)
            value_type = 'p'

        if value_type == 'p':
            return pickle.loads(force_bytes(value))

        raise ValueError(
            "Unknown value_type '{}' read from the cache table.".format(
                value_type))

    def _maybe_cull(self):
        # Roll the dice, if it says yes then cull
        if self._cull_probability and random() <= self._cull_probability:
            self.cull()

    def get_backend_timeout(self, timeout=DEFAULT_TIMEOUT):
        if timeout is None:
            return self.FOREVER_TIMEOUT
        timeout = super(MySQLCache, self).get_backend_timeout(timeout)
        return int(timeout * 1000)

    # Our API extensions

    def keys_with_prefix(self, prefix, version=None):
        if self.reverse_key_func is None:
            raise ValueError(
                "To use the _with_prefix commands with a custom KEY_FUNCTION, "
                "you need to specify a custom REVERSE_KEY_FUNCTION too.")

        if version is None:
            version = self.version

        db = router.db_for_read(self.cache_model_class)
        table = connections[db].ops.quote_name(self._table)

        prefix = self.make_key(prefix + '%', version=version)

        with connections[db].cursor() as cursor:
            cursor.execute(
                """SELECT cache_key FROM {table}
                   WHERE cache_key LIKE %s AND
                         expires >= %s""".format(table=table),
                (prefix, self._now()))
            rows = cursor.fetchall()
            full_keys = {row[0] for row in rows}

            keys = {}
            for full_key in full_keys:
                key, key_prefix, key_version = self.reverse_key_func(full_key)

                if key_version == version:
                    keys[key] = key_version
            return set(six.iterkeys(keys))

    def get_with_prefix(self, prefix, version=None):
        if self.reverse_key_func is None:
            raise ValueError(
                "To use the _with_prefix commands with a custom KEY_FUNCTION, "
                "you need to specify a custom REVERSE_KEY_FUNCTION too.")

        if version is None:
            version = self.version

        db = router.db_for_read(self.cache_model_class)
        table = connections[db].ops.quote_name(self._table)

        prefix = self.make_key(prefix + '%', version=version)
        version = six.text_type(version)

        with connections[db].cursor() as cursor:
            cursor.execute(
                """SELECT cache_key, value, value_type
                   FROM {table}
                   WHERE cache_key LIKE %s AND
                         expires >= %s""".format(table=table),
                (prefix, self._now()))
            rows = cursor.fetchall()

            data = {}
            for made_key, value, value_type in rows:
                key, key_prefix, key_version = self.reverse_key_func(made_key)
                data[key] = self.decode(value, value_type)

            return data

    def delete_with_prefix(self, prefix, version=None):
        if version is None:
            version = self.version

        db = router.db_for_write(self.cache_model_class)
        table = connections[db].ops.quote_name(self._table)

        prefix = self.make_key(prefix + '%', version=version)

        with connections[db].cursor() as cursor:
            return cursor.execute(
                """DELETE FROM {table}
                   WHERE cache_key LIKE %s""".format(table=table), (prefix, ))

    def cull(self):
        db = router.db_for_write(self.cache_model_class)
        table = connections[db].ops.quote_name(self._table)

        with connections[db].cursor() as cursor:
            # First, try just deleting expired keys
            num_deleted = cursor.execute(
                "DELETE FROM {table} WHERE expires < %s".format(table=table),
                (self._now(), ))

            # -1 means "Don't limit size"
            if self._max_entries == -1:
                return

            cursor.execute("SELECT COUNT(*) FROM {table}".format(table=table))
            num = cursor.fetchone()[0]

            if num < self._max_entries:
                return num_deleted

            # Now do a key-based cull
            if self._cull_frequency == 0:
                num_deleted += cursor.execute(
                    "DELETE FROM {table}".format(table=table))
            else:
                cull_num = num // self._cull_frequency
                cursor.execute(
                    """SELECT cache_key FROM {table}
                       ORDER BY cache_key
                       LIMIT 1 OFFSET %s""".format(table=table), (cull_num, ))
                max_key = cursor.fetchone()[0]
                num_deleted += cursor.execute(
                    """DELETE FROM {table}
                       WHERE cache_key < %s""".format(table=table),
                    (max_key, ))
            return num_deleted
Esempio n. 25
0
class Command(BaseCommand):
    args = "<optional connection alias>"

    help = collapse_spaces("""
        Detects DateTimeFields with column type 'datetime' instead of
        'datetime(6)' and outputs the SQL to fix them.
    """)

    def add_arguments(self, parser):
        parser.add_argument(
            'alias',
            metavar='alias',
            nargs='?',
            default=DEFAULT_DB_ALIAS,
            help='Specify the database connection alias to output '
            'parameters for.',
        )

    def handle(self, *args, **options):
        alias = options['alias']

        try:
            connection = connections[alias]
        except ConnectionDoesNotExist:
            raise CommandError("Connection '{}' does not exist".format(alias))

        if connection.vendor != 'mysql':
            raise CommandError(
                "{} is not a MySQL database connection".format(alias))

        sqls = []
        with connection.cursor() as cursor:
            for table_name in self.all_table_names():
                sql = self.datetime_fix_sql(connection, cursor, table_name)
                if sql:
                    sqls.append(sql)

        for sql in sqls:
            self.stdout.write(sql)

    def all_table_names(self):
        table_names = set()
        for app_config in apps.get_app_configs():
            for model in app_config.get_models():
                table_names.add(model._meta.db_table)
        return list(sorted(table_names))

    def datetime_fix_sql(self, connection, cursor, table_name):
        cursor.execute(
            """
            SELECT COLUMN_NAME
            FROM INFORMATION_SCHEMA.COLUMNS
            WHERE TABLE_SCHEMA = DATABASE() AND
                  TABLE_NAME = %s AND
                  DATA_TYPE = 'datetime' AND
                  DATETIME_PRECISION = 0
            ORDER BY COLUMN_NAME
            """,
            (table_name, ),
        )
        bad_column_names = [r[0] for r in cursor.fetchall()]
        if not bad_column_names:
            return

        qn = connection.ops.quote_name

        cursor.execute("SHOW CREATE TABLE {}".format(qn(table_name)))
        create_table = cursor.fetchone()[1]
        column_specs = parse_create_table(create_table)

        modify_columns = []

        for column_name in bad_column_names:
            column_spec = column_specs[column_name]

            new_column_spec = column_spec.replace('datetime', 'datetime(6)', 1)
            modify_columns.append(
                'MODIFY COLUMN {} {}'.format(qn(column_name),
                                             new_column_spec), )

        return 'ALTER TABLE {table_name}\n    {columns};'.format(
            table_name=qn(table_name),
            columns=',\n    '.join(modify_columns),
        )
Esempio n. 26
0
class MySQLCache(BaseDatabaseCache):

    # Got an error with the add() query using BIGINT_UNSIGNED_MAX, so use a
    # value slightly 1 bit less (still an incalculable time into the future of
    # 1970)
    FOREVER_TIMEOUT = BIGINT_UNSIGNED_MAX >> 1

    create_table_sql = dedent(
        """\
        CREATE TABLE `{table_name}` (
            cache_key varchar(255) CHARACTER SET utf8 COLLATE utf8_bin
                                   NOT NULL PRIMARY KEY,
            value longblob NOT NULL,
            value_type char(1) CHARACTER SET latin1 COLLATE latin1_bin
                               NOT NULL DEFAULT 'p',
            expires BIGINT UNSIGNED NOT NULL
        );
    """
    )

    @classmethod
    def _now(cls) -> int:
        # Values in the expires column are milliseconds since unix epoch (UTC)
        return int(time() * 1000)

    reverse_key_func: Callable[[str], tuple[str, str, int]] | None

    def __init__(self, table: str, params: dict[str, Any]) -> None:
        super().__init__(table, params)
        options = params.get("OPTIONS", {})
        self._compress_min_length = options.get("COMPRESS_MIN_LENGTH", 5000)
        self._compress_level = options.get("COMPRESS_LEVEL", 6)
        self._cull_probability = options.get("CULL_PROBABILITY", 0.01)

        # Figure out our *reverse* key function
        if self.key_func is default_key_func:
            self.reverse_key_func = default_reverse_key_func
            if ":" in self.key_prefix:
                raise ValueError(
                    "Cannot use the default KEY_FUNCTION and "
                    "REVERSE_KEY_FUNCTION if you have a colon in your "
                    "KEY_PREFIX."
                )
        else:
            reverse_key_func = params.get("REVERSE_KEY_FUNCTION", None)
            self.reverse_key_func = get_reverse_key_func(reverse_key_func)

    # Django API + helpers

    def get(
        self, key: str, default: Any | None = None, version: int | None = None
    ) -> Any:
        key = self.make_key(key, version=version)
        self.validate_key(key)
        db = router.db_for_read(self.cache_model_class)
        table = connections[db].ops.quote_name(self._table)

        with connections[db].cursor() as cursor:
            cursor.execute(self._get_query.format(table=table), (key, self._now()))
            row = cursor.fetchone()

        if row is None:
            return default
        else:
            value, value_type = row
            return self.decode(value, value_type)

    _get_query = collapse_spaces(
        """
        SELECT value, value_type
        FROM {table}
        WHERE cache_key = %s AND
              expires >= %s
    """
    )

    def get_many(
        self, keys: Iterable[str], version: int | None = None
    ) -> dict[str, Any]:
        made_key_to_key = {self.make_key(key, version=version): key for key in keys}
        made_keys = list(made_key_to_key.keys())
        for key in made_keys:
            self.validate_key(key)

        db = router.db_for_read(self.cache_model_class)
        table = connections[db].ops.quote_name(self._table)

        with connections[db].cursor() as cursor:
            cursor.execute(
                self._get_many_query.format(
                    table=table, list_sql=get_list_sql(made_keys)
                ),
                made_keys + [self._now()],
            )
            rows = cursor.fetchall()

        data = {}

        for made_key, value, value_type in rows:
            key = made_key_to_key[made_key]
            data[key] = self.decode(value, value_type)

        return data

    _get_many_query = collapse_spaces(
        """
        SELECT cache_key, value, value_type
        FROM {table}
        WHERE cache_key IN {list_sql} AND
              expires >= %s
    """
    )

    def set(
        self,
        key: str,
        value: Any,
        timeout: Any = DEFAULT_TIMEOUT,
        version: int | None = None,
    ) -> None:
        key = self.make_key(key, version=version)
        self.validate_key(key)
        self._base_set("set", key, value, timeout)

    def add(
        self,
        key: str,
        value: Any,
        timeout: Any = DEFAULT_TIMEOUT,
        version: int | None = None,
    ) -> bool:
        key = self.make_key(key, version=version)
        self.validate_key(key)
        return self._base_set("add", key, value, timeout)

    def _base_set(
        self, mode: str, key: str, value: Any, timeout: Any = DEFAULT_TIMEOUT
    ) -> bool:
        if mode not in ("set", "add"):
            raise ValueError("'mode' should be 'set' or 'add'")

        exp = self.get_backend_timeout(timeout)
        db = router.db_for_write(self.cache_model_class)
        table = connections[db].ops.quote_name(self._table)

        self._maybe_cull()
        with connections[db].cursor() as cursor:

            value, value_type = self.encode(value)

            params: tuple[Any, ...]
            if mode == "set":
                query = self._set_query
                params = (key, value, value_type, exp)
            else:  # mode = 'add'
                query = self._add_query
                params = (key, value, value_type, exp, self._now())

            cursor.execute(query.format(table=table), params)

            if mode == "set":
                return True
            else:  # mode = 'add'
                # Use a special code in the add query for "did insert"
                insert_id = cursor.lastrowid
                return insert_id != 444

    _set_many_query = collapse_spaces(
        """
        INSERT INTO {table} (cache_key, value, value_type, expires)
        VALUES {{VALUES_CLAUSE}}
        ON DUPLICATE KEY UPDATE
            value=VALUES(value),
            value_type=VALUES(value_type),
            expires=VALUES(expires)
    """
    )

    _set_query = _set_many_query.replace("{{VALUES_CLAUSE}}", "(%s, %s, %s, %s)")

    # Uses the IFNULL / LEAST / LAST_INSERT_ID trick to communicate the special
    # value of 444 back to the client (LAST_INSERT_ID is otherwise 0, since
    # there is no AUTO_INCREMENT column)
    _add_query = collapse_spaces(
        """
        INSERT INTO {table} (cache_key, value, value_type, expires)
        VALUES (%s, %s, %s, %s)
        ON DUPLICATE KEY UPDATE
            value=IF(expires > @tmp_now:=%s, value, VALUES(value)),
            value_type=IF(expires > @tmp_now, value_type, VALUES(value_type)),
            expires=IF(
                expires > @tmp_now,
                IFNULL(
                    LEAST(LAST_INSERT_ID(444), NULL),
                    expires
                ),
                VALUES(expires)
            )
    """
    )

    def set_many(
        self,
        data: dict[str, Any],
        timeout: Any = DEFAULT_TIMEOUT,
        version: int | None = None,
    ) -> list[str]:
        exp = self.get_backend_timeout(timeout)
        db = router.db_for_write(self.cache_model_class)
        table = connections[db].ops.quote_name(self._table)

        self._maybe_cull()

        params: list[Any] = []
        for key, value in data.items():
            made_key = self.make_key(key, version=version)
            self.validate_key(made_key)
            value, value_type = self.encode(value)
            params.extend((made_key, value, value_type, exp))

        query = self._set_many_query.replace(
            "{{VALUES_CLAUSE}}", ",".join("(%s, %s, %s, %s)" for key in data)
        ).format(table=table)

        with connections[db].cursor() as cursor:
            cursor.execute(query, params)
        return []

    def delete(self, key: str, version: int | None = None) -> None:
        key = self.make_key(key, version=version)
        self.validate_key(key)

        db = router.db_for_write(self.cache_model_class)
        table = connections[db].ops.quote_name(self._table)

        with connections[db].cursor() as cursor:
            cursor.execute(self._delete_query.format(table=table), (key,))

    _delete_query = collapse_spaces(
        """
        DELETE FROM {table}
        WHERE cache_key = %s
    """
    )

    def delete_many(self, keys: Iterable[str], version: int | None = None) -> None:
        made_keys = [self.make_key(key, version=version) for key in keys]
        for key in made_keys:
            self.validate_key(key)

        db = router.db_for_write(self.cache_model_class)
        table = connections[db].ops.quote_name(self._table)

        with connections[db].cursor() as cursor:
            cursor.execute(
                self._delete_many_query.format(
                    table=table, list_sql=get_list_sql(made_keys)
                ),
                made_keys,
            )

    _delete_many_query = collapse_spaces(
        """
        DELETE FROM {table}
        WHERE cache_key IN {list_sql}
    """
    )

    def has_key(self, key: str, version: int | None = None) -> bool:
        key = self.make_key(key, version=version)
        self.validate_key(key)

        db = router.db_for_read(self.cache_model_class)
        table = connections[db].ops.quote_name(self._table)

        with connections[db].cursor() as cursor:
            cursor.execute(self._has_key_query.format(table=table), (key, self._now()))
            return cursor.fetchone() is not None

    _has_key_query = collapse_spaces(
        """
        SELECT 1 FROM {table}
        WHERE cache_key = %s and expires > %s
    """
    )

    def incr(self, key: str, delta: int = 1, version: int | None = None) -> int:
        return self._base_delta(key, delta, version, "+")

    def decr(self, key: str, delta: int = 1, version: int | None = None) -> int:
        return self._base_delta(key, delta, version, "-")

    def _base_delta(
        self,
        key: str,
        delta: int,
        version: int | None,
        operation: _BaseDeltaType,
    ) -> int:
        key = self.make_key(key, version=version)
        self.validate_key(key)

        db = router.db_for_write(self.cache_model_class)
        table = connections[db].ops.quote_name(self._table)

        with connections[db].cursor() as cursor:
            updated = cursor.execute(
                self._delta_query.format(table=table, operation=operation), (delta, key)
            )

            if not updated:
                raise ValueError("Key '%s' not found, or not an integer" % key)

            # New value stored in insert_id
            return cursor.lastrowid

    # Looks a bit tangled to turn the blob back into an int for updating, but
    # it works. Stores the new value for insert_id() with LAST_INSERT_ID
    _delta_query = collapse_spaces(
        """
        UPDATE {table}
        SET value = LAST_INSERT_ID(
            CAST(value AS SIGNED INTEGER)
            {operation}
            %s
        )
        WHERE cache_key = %s AND
              value_type = 'i'
    """
    )

    def clear(self) -> None:
        db = router.db_for_write(self.cache_model_class)
        table = connections[db].ops.quote_name(self._table)
        with connections[db].cursor() as cursor:
            cursor.execute(f"DELETE FROM {table}")

    def touch(
        self, key: str, timeout: Any = DEFAULT_TIMEOUT, version: int | None = None
    ) -> None:
        key = self.make_key(key, version=version)
        self.validate_key(key)
        exp = self.get_backend_timeout(timeout)
        db = router.db_for_write(self.cache_model_class)
        table = connections[db].ops.quote_name(self._table)
        with connections[db].cursor() as cursor:
            cursor.execute(
                self._touch_query.format(table=table), [exp, key, self._now()]
            )

    _touch_query = collapse_spaces(
        """
        UPDATE {table}
        SET expires = %s
        WHERE cache_key = %s AND
              expires >= %s
    """
    )

    def validate_key(self, key: str) -> None:
        """
        Django normally warns about maximum key length, but we error on it.
        """
        if len(key) > 250:
            raise ValueError(
                f"Cache key is longer than the maxmimum 250 characters: {key}"
            )
        return super().validate_key(key)

    def encode(self, obj: Any) -> tuple[int | bytes, _EncodedKeyType]:
        """
        Take a Python object and return it as a tuple (value, value_type), a
        blob and a one-char code for what type it is
        """
        if self._is_valid_mysql_bigint(obj):
            return obj, "i"

        value = pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)
        value_type: _EncodedKeyType = "p"
        if self._compress_min_length and len(value) >= self._compress_min_length:
            value = zlib.compress(value, self._compress_level)
            value_type = "z"
        return value, value_type

    def _is_valid_mysql_bigint(self, value: int | bytes) -> bool:
        return (
            # Can't support int subclasses since they should are expected to
            # decode back to the same object
            type(value) is int
            # Can't go beyond these ranges
            and BIGINT_SIGNED_MIN <= value <= BIGINT_SIGNED_MAX
        )

    def decode(self, value: bytes, value_type: _EncodedKeyType) -> Any:
        """
        Take a value blob and its value_type one-char code and convert it back
        to a python object
        """
        if value_type == "i":
            return int(value)

        raw_value: bytes
        if value_type == "z":
            raw_value = zlib.decompress(value)
            value_type = "p"
        else:
            raw_value = force_bytes(value)

        if value_type == "p":
            return pickle.loads(raw_value)

        raise ValueError(
            f"Unknown value_type '{value_type}' read from the cache table."
        )

    def _maybe_cull(self) -> None:
        # Roll the dice, if it says yes then cull
        if self._cull_probability and random() <= self._cull_probability:
            self.cull()

    def get_backend_timeout(self, timeout: Any = DEFAULT_TIMEOUT) -> int:
        if timeout is None:
            return self.FOREVER_TIMEOUT
        timeout = super().get_backend_timeout(timeout)
        return int(timeout * 1000)

    # Our API extensions

    def keys_with_prefix(
        self, prefix: str, version: int | None = None
    ) -> builtins.set[str]:
        if self.reverse_key_func is None:
            raise ValueError(
                "To use the _with_prefix commands with a custom KEY_FUNCTION, "
                "you need to specify a custom REVERSE_KEY_FUNCTION too."
            )

        if version is None:
            version = self.version

        db = router.db_for_read(self.cache_model_class)
        table = connections[db].ops.quote_name(self._table)

        prefix = self.make_key(prefix + "%", version=version)

        with connections[db].cursor() as cursor:
            cursor.execute(
                """SELECT cache_key FROM {table}
                   WHERE cache_key LIKE %s AND
                         expires >= %s""".format(
                    table=table
                ),
                (prefix, self._now()),
            )
            rows = cursor.fetchall()
            full_keys = {row[0] for row in rows}

            keys = {}
            for full_key in full_keys:
                key, key_prefix, key_version = self.reverse_key_func(full_key)
                keys[key] = key_version
            return set(keys)

    def get_with_prefix(
        self, prefix: str, version: int | None = None
    ) -> dict[str, Any]:
        if self.reverse_key_func is None:
            raise ValueError(
                "To use the _with_prefix commands with a custom KEY_FUNCTION, "
                "you need to specify a custom REVERSE_KEY_FUNCTION too."
            )

        if version is None:
            version = self.version

        db = router.db_for_read(self.cache_model_class)
        table = connections[db].ops.quote_name(self._table)

        prefix = self.make_key(prefix + "%", version=version)

        with connections[db].cursor() as cursor:
            cursor.execute(
                """SELECT cache_key, value, value_type
                   FROM {table}
                   WHERE cache_key LIKE %s AND
                         expires >= %s""".format(
                    table=table
                ),
                (prefix, self._now()),
            )
            rows = cursor.fetchall()

            data = {}
            for made_key, value, value_type in rows:
                key, key_prefix, key_version = self.reverse_key_func(made_key)
                data[key] = self.decode(value, value_type)

            return data

    def delete_with_prefix(self, prefix: str, version: int | None = None) -> int:
        if version is None:
            version = self.version

        db = router.db_for_write(self.cache_model_class)
        table = connections[db].ops.quote_name(self._table)

        prefix = self.make_key(prefix + "%", version=version)

        with connections[db].cursor() as cursor:
            return cursor.execute(
                """DELETE FROM {table}
                   WHERE cache_key LIKE %s""".format(
                    table=table
                ),
                (prefix,),
            )

    def cull(self) -> int:
        db = router.db_for_write(self.cache_model_class)
        table = connections[db].ops.quote_name(self._table)

        with connections[db].cursor() as cursor:
            # First, try just deleting expired keys
            num_deleted = cursor.execute(
                f"DELETE FROM {table} WHERE expires < %s",
                (self._now(),),
            )

            # -1 means "Don't limit size"
            if self._max_entries == -1:
                return 0

            cursor.execute(f"SELECT COUNT(*) FROM {table}")
            num = cursor.fetchone()[0]

            if num < self._max_entries:
                return num_deleted

            # Now do a key-based cull
            if self._cull_frequency == 0:
                num_deleted += cursor.execute(f"DELETE FROM {table}")
            else:
                cull_num = num // self._cull_frequency
                cursor.execute(
                    """SELECT cache_key FROM {table}
                       ORDER BY cache_key
                       LIMIT 1 OFFSET %s""".format(
                        table=table
                    ),
                    (cull_num,),
                )
                max_key = cursor.fetchone()[0]
                num_deleted += cursor.execute(
                    """DELETE FROM {table}
                       WHERE cache_key < %s""".format(
                        table=table
                    ),
                    (max_key,),
                )
            return num_deleted