Exemplo n.º 1
0
class RedisRateLimiter(RateLimiter):
    ttl = 60

    def __init__(self, **options):
        if not options:
            # inherit default options from REDIS_OPTIONS
            options = settings.SENTRY_REDIS_OPTIONS
        options.setdefault('hosts', {0: {}})

        self.cluster = Cluster(options['hosts'])

    def validate(self):
        try:
            with self.cluster.all() as client:
                client.ping()
        except Exception as e:
            raise InvalidConfiguration(unicode(e))

    def is_limited(self, project, key, limit):
        key = 'rl:%s:%s:%s' % (
            key, project.id, int(time() / self.ttl)
        )

        with self.cluster.map() as client:
            proj_result = client.incr(key)
            client.expire(key, self.ttl)

        return proj_result.value > limit
Exemplo n.º 2
0
class RedisBuffer(Buffer):
    key_expire = 60 * 60  # 1 hour
    pending_key = 'b:p'

    def __init__(self, **options):
        if not options:
            # inherit default options from REDIS_OPTIONS
            options = settings.SENTRY_REDIS_OPTIONS

        options.setdefault('hosts', {
            0: {},
        })
        self.cluster = Cluster(options['hosts'])

    def validate(self):
        try:
            with self.cluster.all() as client:
                client.ping()
        except Exception as e:
            raise InvalidConfiguration(unicode(e))

    def _coerce_val(self, value):
        if isinstance(value, models.Model):
            value = value.pk
        return smart_str(value)

    def _make_key(self, model, filters):
        """
        Returns a Redis-compatible key for the model given filters.
        """
        return 'b:k:%s:%s' % (
            model._meta,
            md5(smart_str('&'.join('%s=%s' % (k, self._coerce_val(v))
                for k, v in sorted(filters.iteritems())))).hexdigest(),
        )

    def _make_lock_key(self, key):
        return 'l:%s' % (key,)

    def incr(self, model, columns, filters, extra=None):
        """
        Increment the key by doing the following:

        - Insert/update a hashmap based on (model, columns)
            - Perform an incrby on counters
            - Perform a set (last write wins) on extra
        - Add hashmap key to pending flushes
        """
        # TODO(dcramer): longer term we'd rather not have to serialize values
        # here (unless it's to JSON)
        key = self._make_key(model, filters)
        # We can't use conn.map() due to wanting to support multiple pending
        # keys (one per Redis shard)
        conn = self.cluster.get_local_client_for_key(key)

        pipe = conn.pipeline()
        pipe.hsetnx(key, 'm', '%s.%s' % (model.__module__, model.__name__))
        pipe.hsetnx(key, 'f', pickle.dumps(filters))
        for column, amount in columns.iteritems():
            pipe.hincrby(key, 'i+' + column, amount)

        if extra:
            for column, value in extra.iteritems():
                pipe.hset(key, 'e+' + column, pickle.dumps(value))
        pipe.expire(key, self.key_expire)
        pipe.zadd(self.pending_key, time(), key)
        pipe.execute()

    def process_pending(self):
        client = self.cluster.get_routing_client()
        lock_key = self._make_lock_key(self.pending_key)
        # prevent a stampede due to celerybeat + periodic task
        if not client.set(lock_key, '1', nx=True, ex=60):
            return

        try:
            for host_id in self.cluster.hosts.iterkeys():
                conn = self.cluster.get_local_client(host_id)
                keys = conn.zrange(self.pending_key, 0, -1)
                if not keys:
                    continue
                for key in keys:
                    process_incr.apply_async(kwargs={
                        'key': key,
                    })
                pipe = conn.pipeline()
                pipe.zrem(self.pending_key, *keys)
                pipe.execute()
        finally:
            client.delete(lock_key)

    def process(self, key):
        client = self.cluster.get_routing_client()
        lock_key = self._make_lock_key(key)
        # prevent a stampede due to the way we use celery etas + duplicate
        # tasks
        if not client.set(lock_key, '1', nx=True, ex=10):
            return

        with self.cluster.map() as conn:
            values = conn.hgetall(key)
            conn.delete(key)

        if not values.value:
            return

        model = import_string(values.value['m'])
        filters = pickle.loads(values.value['f'])
        incr_values = {}
        extra_values = {}
        for k, v in values.value.iteritems():
            if k.startswith('i+'):
                incr_values[k[2:]] = int(v)
            elif k.startswith('e+'):
                extra_values[k[2:]] = pickle.loads(v)

        super(RedisBuffer, self).process(model, incr_values, filters, extra_values)
Exemplo n.º 3
0
class RedisTSDB(BaseTSDB):
    """
    A time series storage implementation which maps types + normalized epochs
    to hash buckets.

    Since each hash keyspace is an epoch, TTLs are applied to the entire bucket.

    This ends up looking something like the following inside of Redis:

    {
        "TSDBModel:epoch:shard": {
            "Key": Count
        }
    }

    In our case, this translates to:

    {
        "Group:epoch:shard": {
            "GroupID": Count
        }
    }

    - ``vnodes`` controls the shard distribution and should ideally be set to
      the maximum number of physical hosts.
    """
    def __init__(self, hosts=None, prefix='ts:', vnodes=64, **kwargs):
        # inherit default options from REDIS_OPTIONS
        defaults = settings.SENTRY_REDIS_OPTIONS

        if hosts is None:
            hosts = defaults.get('hosts', {0: {}})

        self.cluster = Cluster(hosts)
        self.prefix = prefix
        self.vnodes = vnodes
        super(RedisTSDB, self).__init__(**kwargs)

    def validate(self):
        logger.info('Validating Redis version...')

        try:
            with self.cluster.all() as client:
                results = client.info()
        except Exception as e:
            # Any connection issues should be caught here.
            raise InvalidConfiguration(unicode(e))

        versions = {}
        for id, info in results.value.items():
            host = self.cluster.hosts[id]
            # NOTE: This assumes there is no routing magic going on here, and
            # all requests to this host are being served by the same database.
            key = '{host}:{port}'.format(host=host.host, port=host.port)
            versions[key] = Version(
                map(int, info['redis_version'].split('.', 3)))

        check_versions('Redis (TSDB)', versions, Version((2, 8, 9)),
                       Version((3, 0, 4)))

    def make_key(self, model, epoch, model_key):
        if isinstance(model_key, six.integer_types):
            vnode = model_key % self.vnodes
        else:
            vnode = crc32(model_key) % self.vnodes

        return '{0}{1}:{2}:{3}'.format(self.prefix, model.value, epoch, vnode)

    def get_model_key(self, key):
        # We specialize integers so that a pure int-map can be optimized by
        # Redis, whereas long strings (say tag values) will store in a more
        # efficient hashed format.
        if not isinstance(key, six.integer_types):
            # enforce utf-8 encoding
            if isinstance(key, unicode):
                key = key.encode('utf-8')
            return md5(repr(key)).hexdigest()
        return key

    def incr(self, model, key, timestamp=None, count=1):
        self.incr_multi([(model, key)], timestamp, count)

    def incr_multi(self, items, timestamp=None, count=1):
        """
        Increment project ID=1 and group ID=5:

        >>> incr_multi([(TimeSeriesModel.project, 1), (TimeSeriesModel.group, 5)])
        """
        make_key = self.make_key
        normalize_to_rollup = self.normalize_to_rollup
        if timestamp is None:
            timestamp = timezone.now()

        with self.cluster.map() as client:
            for rollup, max_values in self.rollups:
                norm_rollup = normalize_to_rollup(timestamp, rollup)
                for model, key in items:
                    model_key = self.get_model_key(key)
                    hash_key = make_key(model, norm_rollup, model_key)
                    client.hincrby(hash_key, model_key, count)
                    client.expireat(
                        hash_key,
                        self.calculate_expiry(rollup, max_values, timestamp),
                    )

    def get_range(self, model, keys, start, end, rollup=None):
        """
        To get a range of data for group ID=[1, 2, 3]:

        Start and end are both inclusive.

        >>> now = timezone.now()
        >>> get_keys(TimeSeriesModel.group, [1, 2, 3],
        >>>          start=now - timedelta(days=1),
        >>>          end=now)
        """
        normalize_to_epoch = self.normalize_to_epoch
        normalize_to_rollup = self.normalize_to_rollup
        make_key = self.make_key

        if rollup is None:
            rollup = self.get_optimal_rollup(start, end)

        results = []
        timestamp = end
        with self.cluster.map() as client:
            while timestamp >= start:
                real_epoch = normalize_to_epoch(timestamp, rollup)
                norm_epoch = normalize_to_rollup(timestamp, rollup)

                for key in keys:
                    model_key = self.get_model_key(key)
                    hash_key = make_key(model, norm_epoch, model_key)
                    results.append(
                        (real_epoch, key, client.hget(hash_key, model_key)))

                timestamp = timestamp - timedelta(seconds=rollup)

        results_by_key = defaultdict(dict)
        for epoch, key, count in results:
            results_by_key[key][epoch] = int(count.value or 0)

        for key, points in results_by_key.iteritems():
            results_by_key[key] = sorted(points.items())
        return dict(results_by_key)
Exemplo n.º 4
0
class RedisQuota(Quota):
    ttl = 60

    def __init__(self, **options):
        if not options:
            # inherit default options from REDIS_OPTIONS
            options = settings.SENTRY_REDIS_OPTIONS
        super(RedisQuota, self).__init__(**options)
        options.setdefault('hosts', {0: {}})
        self.cluster = Cluster(options['hosts'])

    def validate(self):
        try:
            with self.cluster.all() as client:
                client.ping()
        except Exception as e:
            raise InvalidConfiguration(unicode(e))

    def is_rate_limited(self, project):
        proj_quota = self.get_project_quota(project)
        if project.team:
            team_quota = self.get_team_quota(project.team)
        else:
            team_quota = 0
        system_quota = self.get_system_quota()

        if not (proj_quota or system_quota or team_quota):
            return NotRateLimited

        sys_result, team_result, proj_result = self._incr_project(project)

        if proj_quota and proj_result > proj_quota:
            return RateLimited(retry_after=self.get_time_remaining())

        if team_quota and team_result > team_quota:
            return RateLimited(retry_after=self.get_time_remaining())

        if system_quota and sys_result > system_quota:
            return RateLimited(retry_after=self.get_time_remaining())

        return NotRateLimited

    def get_time_remaining(self):
        return int(self.ttl - (
            time.time() - int(time.time() / self.ttl) * self.ttl))

    def _get_system_key(self):
        return 'quota:s:%s' % (int(time.time() / self.ttl),)

    def _get_team_key(self, team):
        return 'quota:t:%s:%s' % (team.id, int(time.time() / self.ttl))

    def _get_project_key(self, project):
        return 'quota:p:%s:%s' % (project.id, int(time.time() / self.ttl))

    def _incr_project(self, project):
        if project.team:
            team_key = self._get_team_key(project.team)
        else:
            team_key = None
            team_result = None

        proj_key = self._get_project_key(project)
        sys_key = self._get_system_key()
        with self.cluster.map() as client:
            proj_result = client.incr(proj_key)
            client.expire(proj_key, self.ttl)
            sys_result = client.incr(sys_key)
            client.expire(sys_key, self.ttl)
            if team_key:
                team_result = client.incr(team_key)
                client.expire(team_key, self.ttl)

        return (
            int(sys_result.value),
            int(team_result and team_result.value or 0),
            int(proj_result.value),
        )
Exemplo n.º 5
0
class RedisTSDB(BaseTSDB):
    """
    A time series storage implementation which maps types + normalized epochs
    to hash buckets.

    Since each hash keyspace is an epoch, TTLs are applied to the entire bucket.

    This ends up looking something like the following inside of Redis:

    {
        "TSDBModel:epoch:shard": {
            "Key": Count
        }
    }

    In our case, this translates to:

    {
        "Group:epoch:shard": {
            "GroupID": Count
        }
    }

    - ``vnodes`` controls the shard distribution and should ideally be set to
      the maximum number of physical hosts.
    """
    def __init__(self, hosts=None, prefix='ts:', vnodes=64, **kwargs):
        # inherit default options from REDIS_OPTIONS
        defaults = settings.SENTRY_REDIS_OPTIONS

        if hosts is None:
            hosts = defaults.get('hosts', {0: {}})

        self.cluster = Cluster(hosts)
        self.prefix = prefix
        self.vnodes = vnodes
        super(RedisTSDB, self).__init__(**kwargs)

    def validate(self):
        try:
            with self.cluster.all() as client:
                client.ping()
        except Exception as e:
            raise InvalidConfiguration(unicode(e))

    def make_key(self, model, epoch, model_key):
        if isinstance(model_key, six.integer_types):
            vnode = model_key % self.vnodes
        else:
            vnode = crc32(model_key) % self.vnodes

        return '{0}{1}:{2}:{3}'.format(self.prefix, model.value, epoch, vnode)

    def get_model_key(self, key):
        # We specialize integers so that a pure int-map can be optimized by
        # Redis, whereas long strings (say tag values) will store in a more
        # efficient hashed format.
        if not isinstance(key, six.integer_types):
            # enforce utf-8 encoding
            if isinstance(key, unicode):
                key = key.encode('utf-8')
            return md5(repr(key)).hexdigest()
        return key

    def incr(self, model, key, timestamp=None, count=1):
        self.incr_multi([(model, key)], timestamp, count)

    def incr_multi(self, items, timestamp=None, count=1):
        """
        Increment project ID=1 and group ID=5:

        >>> incr_multi([(TimeSeriesModel.project, 1), (TimeSeriesModel.group, 5)])
        """
        make_key = self.make_key
        normalize_to_rollup = self.normalize_to_rollup
        if timestamp is None:
            timestamp = timezone.now()

        with self.cluster.map() as client:
            for rollup, max_values in self.rollups:
                norm_rollup = normalize_to_rollup(timestamp, rollup)
                expire = rollup * max_values

                for model, key in items:
                    model_key = self.get_model_key(key)
                    hash_key = make_key(model, norm_rollup, model_key)
                    client.hincrby(hash_key, model_key, count)
                    client.expire(hash_key, expire)

    def get_range(self, model, keys, start, end, rollup=None):
        """
        To get a range of data for group ID=[1, 2, 3]:

        Start and end are both inclusive.

        >>> now = timezone.now()
        >>> get_keys(TimeSeriesModel.group, [1, 2, 3],
        >>>          start=now - timedelta(days=1),
        >>>          end=now)
        """
        normalize_to_epoch = self.normalize_to_epoch
        normalize_to_rollup = self.normalize_to_rollup
        make_key = self.make_key

        if rollup is None:
            rollup = self.get_optimal_rollup(start, end)

        results = []
        timestamp = end
        with self.cluster.map() as client:
            while timestamp >= start:
                real_epoch = normalize_to_epoch(timestamp, rollup)
                norm_epoch = normalize_to_rollup(timestamp, rollup)

                for key in keys:
                    model_key = self.get_model_key(key)
                    hash_key = make_key(model, norm_epoch, model_key)
                    results.append((real_epoch, key,
                                    client.hget(hash_key, model_key)))

                timestamp = timestamp - timedelta(seconds=rollup)

        results_by_key = defaultdict(dict)
        for epoch, key, count in results:
            results_by_key[key][epoch] = int(count.value or 0)

        for key, points in results_by_key.iteritems():
            results_by_key[key] = sorted(points.items())
        return dict(results_by_key)
Exemplo n.º 6
0
class RedisTSDB(BaseTSDB):
    """
    A time series storage backend for Redis.

    The time series API supports two data types:

        * simple counters
        * distinct counters (number of unique elements seen)

    The backend also supports virtual nodes (``vnodes``) which controls shard
    distribution. This value should be set to the anticipated maximum number of
    physical hosts and not modified after data has been written.

    Simple counters are stored in hashes. The key of the hash is composed of
    the model, epoch (which defines the start of the rollup period), and a
    shard identifier. This allows TTLs to be applied to the entire bucket,
    instead of having to be stored for every individual element in the rollup
    period. This results in a data layout that looks something like this::

        {
            "<model>:<epoch>:<shard id>": {
                "<key>": value,
                ...
            },
            ...
        }

    Distinct counters are stored using HyperLogLog, which provides a
    cardinality estimate with a standard error of 0.8%. The data layout looks
    something like this::

        {
            "<model>:<epoch>:<key>": value,
            ...
        }

    """
    def __init__(self, hosts=None, prefix='ts:', vnodes=64, **kwargs):
        # inherit default options from REDIS_OPTIONS
        defaults = settings.SENTRY_REDIS_OPTIONS

        if hosts is None:
            hosts = defaults.get('hosts', {0: {}})

        self.cluster = Cluster(hosts)
        self.prefix = prefix
        self.vnodes = vnodes
        super(RedisTSDB, self).__init__(**kwargs)

    def validate(self):
        logger.info('Validating Redis version...')

        try:
            with self.cluster.all() as client:
                results = client.info()
        except Exception as e:
            # Any connection issues should be caught here.
            raise InvalidConfiguration(unicode(e))

        versions = {}
        for id, info in results.value.items():
            host = self.cluster.hosts[id]
            # NOTE: This assumes there is no routing magic going on here, and
            # all requests to this host are being served by the same database.
            key = '{host}:{port}'.format(host=host.host, port=host.port)
            versions[key] = Version(
                map(int, info['redis_version'].split('.', 3)))

        check_versions('Redis (TSDB)', versions, Version((2, 8, 9)),
                       Version((3, 0, 4)))

    def make_key(self, model, epoch, model_key):
        if isinstance(model_key, six.integer_types):
            vnode = model_key % self.vnodes
        else:
            vnode = crc32(model_key) % self.vnodes

        return '{0}{1}:{2}:{3}'.format(self.prefix, model.value, epoch, vnode)

    def get_model_key(self, key):
        # We specialize integers so that a pure int-map can be optimized by
        # Redis, whereas long strings (say tag values) will store in a more
        # efficient hashed format.
        if not isinstance(key, six.integer_types):
            # enforce utf-8 encoding
            if isinstance(key, unicode):
                key = key.encode('utf-8')
            return md5(repr(key)).hexdigest()
        return key

    def incr(self, model, key, timestamp=None, count=1):
        self.incr_multi([(model, key)], timestamp, count)

    def incr_multi(self, items, timestamp=None, count=1):
        """
        Increment project ID=1 and group ID=5:

        >>> incr_multi([(TimeSeriesModel.project, 1), (TimeSeriesModel.group, 5)])
        """
        make_key = self.make_key
        normalize_to_rollup = self.normalize_to_rollup
        if timestamp is None:
            timestamp = timezone.now()

        with self.cluster.map() as client:
            for rollup, max_values in self.rollups:
                norm_rollup = normalize_to_rollup(timestamp, rollup)
                for model, key in items:
                    model_key = self.get_model_key(key)
                    hash_key = make_key(model, norm_rollup, model_key)
                    client.hincrby(hash_key, model_key, count)
                    client.expireat(
                        hash_key,
                        self.calculate_expiry(rollup, max_values, timestamp),
                    )

    def get_range(self, model, keys, start, end, rollup=None):
        """
        To get a range of data for group ID=[1, 2, 3]:

        Start and end are both inclusive.

        >>> now = timezone.now()
        >>> get_keys(TimeSeriesModel.group, [1, 2, 3],
        >>>          start=now - timedelta(days=1),
        >>>          end=now)
        """
        normalize_to_epoch = self.normalize_to_epoch
        normalize_to_rollup = self.normalize_to_rollup
        make_key = self.make_key

        if rollup is None:
            rollup = self.get_optimal_rollup(start, end)

        results = []
        timestamp = end
        with self.cluster.map() as client:
            while timestamp >= start:
                real_epoch = normalize_to_epoch(timestamp, rollup)
                norm_epoch = normalize_to_rollup(timestamp, rollup)

                for key in keys:
                    model_key = self.get_model_key(key)
                    hash_key = make_key(model, norm_epoch, model_key)
                    results.append(
                        (real_epoch, key, client.hget(hash_key, model_key)))

                timestamp = timestamp - timedelta(seconds=rollup)

        results_by_key = defaultdict(dict)
        for epoch, key, count in results:
            results_by_key[key][epoch] = int(count.value or 0)

        for key, points in results_by_key.iteritems():
            results_by_key[key] = sorted(points.items())
        return dict(results_by_key)

    def make_distinct_counter_key(self, model, rollup, timestamp, key):
        return '{prefix}{model}:{epoch}:{key}'.format(
            prefix=self.prefix,
            model=model.value,
            epoch=self.normalize_ts_to_rollup(timestamp, rollup),
            key=self.get_model_key(key),
        )

    def record(self, model, key, values, timestamp=None):
        self.record_multi(((model, key, values), ), timestamp)

    def record_multi(self, items, timestamp=None):
        """
        Record an occurence of an item in a distinct counter.
        """
        if timestamp is None:
            timestamp = timezone.now()

        ts = int(to_timestamp(
            timestamp))  # ``timestamp`` is not actually a timestamp :(

        with self.cluster.fanout() as client:
            for model, key, values in items:
                c = client.target_key(key)
                for rollup, max_values in self.rollups:
                    k = self.make_distinct_counter_key(
                        model,
                        rollup,
                        ts,
                        key,
                    )
                    c.pfadd(k, *values)
                    c.expireat(
                        k,
                        self.calculate_expiry(
                            rollup,
                            max_values,
                            timestamp,
                        ),
                    )

    def get_distinct_counts_series(self,
                                   model,
                                   keys,
                                   start,
                                   end=None,
                                   rollup=None):
        """
        Fetch counts of distinct items for each rollup interval within the range.
        """
        rollup, series = self.get_optimal_rollup_series(start, end, rollup)

        responses = {}
        with self.cluster.fanout() as client:
            for key in keys:
                c = client.target_key(key)
                r = responses[key] = []
                for timestamp in series:
                    r.append((
                        timestamp,
                        c.pfcount(
                            self.make_distinct_counter_key(
                                model,
                                rollup,
                                timestamp,
                                key,
                            ), ),
                    ))

        return {
            key: [(timestamp, promise.value) for timestamp, promise in value]
            for key, value in responses.iteritems()
        }

    def get_distinct_counts_totals(self,
                                   model,
                                   keys,
                                   start,
                                   end=None,
                                   rollup=None):
        """
        Count distinct items during a time range.
        """
        rollup, series = self.get_optimal_rollup_series(start, end, rollup)

        responses = {}
        with self.cluster.fanout() as client:
            for key in keys:
                # XXX: The current versions of the Redis driver don't implement
                # ``PFCOUNT`` correctly (although this is fixed in the Git
                # master, so should be available in the next release) and only
                # supports a single key argument -- not the variadic signature
                # supported by the protocol -- so we have to call the commnand
                # directly here instead.
                ks = []
                for timestamp in series:
                    ks.append(
                        self.make_distinct_counter_key(model, rollup,
                                                       timestamp, key))

                responses[key] = client.target_key(key).execute_command(
                    'PFCOUNT', *ks)

        return {key: value.value for key, value in responses.iteritems()}
Exemplo n.º 7
0
class RedisTSDB(BaseTSDB):
    """
    A time series storage backend for Redis.

    The time series API supports two data types:

        * simple counters
        * distinct counters (number of unique elements seen)

    The backend also supports virtual nodes (``vnodes``) which controls shard
    distribution. This value should be set to the anticipated maximum number of
    physical hosts and not modified after data has been written.

    Simple counters are stored in hashes. The key of the hash is composed of
    the model, epoch (which defines the start of the rollup period), and a
    shard identifier. This allows TTLs to be applied to the entire bucket,
    instead of having to be stored for every individual element in the rollup
    period. This results in a data layout that looks something like this::

        {
            "<model>:<epoch>:<shard id>": {
                "<key>": value,
                ...
            },
            ...
        }

    Distinct counters are stored using HyperLogLog, which provides a
    cardinality estimate with a standard error of 0.8%. The data layout looks
    something like this::

        {
            "<model>:<epoch>:<key>": value,
            ...
        }

    """
    def __init__(self, hosts=None, prefix='ts:', vnodes=64, **kwargs):
        # inherit default options from REDIS_OPTIONS
        defaults = settings.SENTRY_REDIS_OPTIONS

        if hosts is None:
            hosts = defaults.get('hosts', {0: {}})

        self.cluster = Cluster(hosts)
        self.prefix = prefix
        self.vnodes = vnodes
        super(RedisTSDB, self).__init__(**kwargs)

    def validate(self):
        logger.info('Validating Redis version...')

        try:
            with self.cluster.all() as client:
                results = client.info()
        except Exception as e:
            # Any connection issues should be caught here.
            raise InvalidConfiguration(unicode(e))

        versions = {}
        for id, info in results.value.items():
            host = self.cluster.hosts[id]
            # NOTE: This assumes there is no routing magic going on here, and
            # all requests to this host are being served by the same database.
            key = '{host}:{port}'.format(host=host.host, port=host.port)
            versions[key] = Version(map(int, info['redis_version'].split('.', 3)))

        check_versions('Redis (TSDB)', versions, Version((2, 8, 9)), Version((3, 0, 4)))

    def make_key(self, model, epoch, model_key):
        if isinstance(model_key, six.integer_types):
            vnode = model_key % self.vnodes
        else:
            vnode = crc32(model_key) % self.vnodes

        return '{0}{1}:{2}:{3}'.format(self.prefix, model.value, epoch, vnode)

    def get_model_key(self, key):
        # We specialize integers so that a pure int-map can be optimized by
        # Redis, whereas long strings (say tag values) will store in a more
        # efficient hashed format.
        if not isinstance(key, six.integer_types):
            # enforce utf-8 encoding
            if isinstance(key, unicode):
                key = key.encode('utf-8')
            return md5(repr(key)).hexdigest()
        return key

    def incr(self, model, key, timestamp=None, count=1):
        self.incr_multi([(model, key)], timestamp, count)

    def incr_multi(self, items, timestamp=None, count=1):
        """
        Increment project ID=1 and group ID=5:

        >>> incr_multi([(TimeSeriesModel.project, 1), (TimeSeriesModel.group, 5)])
        """
        make_key = self.make_key
        normalize_to_rollup = self.normalize_to_rollup
        if timestamp is None:
            timestamp = timezone.now()

        with self.cluster.map() as client:
            for rollup, max_values in self.rollups:
                norm_rollup = normalize_to_rollup(timestamp, rollup)
                for model, key in items:
                    model_key = self.get_model_key(key)
                    hash_key = make_key(model, norm_rollup, model_key)
                    client.hincrby(hash_key, model_key, count)
                    client.expireat(
                        hash_key,
                        self.calculate_expiry(rollup, max_values, timestamp),
                    )

    def get_range(self, model, keys, start, end, rollup=None):
        """
        To get a range of data for group ID=[1, 2, 3]:

        Start and end are both inclusive.

        >>> now = timezone.now()
        >>> get_keys(TimeSeriesModel.group, [1, 2, 3],
        >>>          start=now - timedelta(days=1),
        >>>          end=now)
        """
        normalize_to_epoch = self.normalize_to_epoch
        normalize_to_rollup = self.normalize_to_rollup
        make_key = self.make_key

        if rollup is None:
            rollup = self.get_optimal_rollup(start, end)

        results = []
        timestamp = end
        with self.cluster.map() as client:
            while timestamp >= start:
                real_epoch = normalize_to_epoch(timestamp, rollup)
                norm_epoch = normalize_to_rollup(timestamp, rollup)

                for key in keys:
                    model_key = self.get_model_key(key)
                    hash_key = make_key(model, norm_epoch, model_key)
                    results.append((real_epoch, key,
                                    client.hget(hash_key, model_key)))

                timestamp = timestamp - timedelta(seconds=rollup)

        results_by_key = defaultdict(dict)
        for epoch, key, count in results:
            results_by_key[key][epoch] = int(count.value or 0)

        for key, points in results_by_key.iteritems():
            results_by_key[key] = sorted(points.items())
        return dict(results_by_key)

    def make_distinct_counter_key(self, model, rollup, timestamp, key):
        return '{prefix}{model}:{epoch}:{key}'.format(
            prefix=self.prefix,
            model=model.value,
            epoch=self.normalize_ts_to_rollup(timestamp, rollup),
            key=self.get_model_key(key),
        )

    def record(self, model, key, values, timestamp=None):
        self.record_multi(((model, key, values),), timestamp)

    def record_multi(self, items, timestamp=None):
        """
        Record an occurence of an item in a distinct counter.
        """
        if timestamp is None:
            timestamp = timezone.now()

        ts = int(to_timestamp(timestamp))  # ``timestamp`` is not actually a timestamp :(

        with self.cluster.fanout() as client:
            for model, key, values in items:
                c = client.target_key(key)
                for rollup, max_values in self.rollups:
                    k = self.make_distinct_counter_key(
                        model,
                        rollup,
                        ts,
                        key,
                    )
                    c.pfadd(k, *values)
                    c.expireat(
                        k,
                        self.calculate_expiry(
                            rollup,
                            max_values,
                            timestamp,
                        ),
                    )

    def get_distinct_counts_series(self, model, keys, start, end=None, rollup=None):
        """
        Fetch counts of distinct items for each rollup interval within the range.
        """
        rollup, series = self.get_optimal_rollup_series(start, end, rollup)

        responses = {}
        with self.cluster.fanout() as client:
            for key in keys:
                c = client.target_key(key)
                r = responses[key] = []
                for timestamp in series:
                    r.append((
                        timestamp,
                        c.pfcount(
                            self.make_distinct_counter_key(
                                model,
                                rollup,
                                timestamp,
                                key,
                            ),
                        ),
                    ))

        return {key: [(timestamp, promise.value) for timestamp, promise in value] for key, value in responses.iteritems()}

    def get_distinct_counts_totals(self, model, keys, start, end=None, rollup=None):
        """
        Count distinct items during a time range.
        """
        rollup, series = self.get_optimal_rollup_series(start, end, rollup)

        responses = {}
        with self.cluster.fanout() as client:
            for key in keys:
                # XXX: The current versions of the Redis driver don't implement
                # ``PFCOUNT`` correctly (although this is fixed in the Git
                # master, so should be available in the next release) and only
                # supports a single key argument -- not the variadic signature
                # supported by the protocol -- so we have to call the commnand
                # directly here instead.
                ks = []
                for timestamp in series:
                    ks.append(self.make_distinct_counter_key(model, rollup, timestamp, key))

                responses[key] = client.target_key(key).execute_command('PFCOUNT', *ks)

        return {key: value.value for key, value in responses.iteritems()}