Beispiel #1
0
class RedisCacheAdapter(object):
    instance = None

    def __new__(cls, *args, **kwargs):
        if cls.instance is None:
            cls.instance = super(RedisCacheAdapter, cls).__new__(cls)
            logger.info('Created redis cache connection')
        return cls.instance

    def __init__(self, **opts):
        self.cache = Redis(**opts)
        logger.info(
            'Created redis cache connection with options: {}'.format(opts))

    def set(self, key, value, expire=None, **opts):
        """Set a value for a key in the cache

        Args:
            key: The key to use for this data
            value: The value to set this key to
            expire (int|datetime.timedelta, optional): The expiration for this value. If `int` is passed, it indicates
                milliseconds
            **opts: Additional options to use. See `FanoutCache` for more details

        Returns:
            (bool): Was this key set?
        """
        return self.cache.set(key, value, px=expire, **opts)

    def get(self, key, **opts):
        """Gets the value stored in the key

        Args:
            key: The key to get the value from
            **opts: Additional options to use.

        Returns:
            The value stored in the key
        """
        return self._decode_response(self.cache.get(key))

    def add(self, key, value, expire=None, **opts):
        """Add a key and a value to the cache if the key is not already in the cache

        Args:
            key: The key to store the value to
            value: Teh value to store in the key
            expire (int|datetime.timedelta, optional): The expiration for this value. If `int` is passed, it indicates
                milliseconds
            **opts: Additional options to use. See `FanoutCache` for more details

        Returns:
            (bool): Was the key set?
        """
        return self.cache.set(key, value, px=expire, nx=True, **opts)

    def delete(self, key):
        """Deletes a key
        """
        return self.cache.delete(key)

    def incr(self, key, amount=1):
        """Increments a key by an amount.

        If the key is not found, then its value becomes the increment amount specified

        Args:
            key: The key to increment
            amount (int, optional): The amount to increment the key by. Defaults to 1
            retry (bool, optional): Should this operation be retried if the transaction times out? Defaults to
                `self.retry`

        Returns:
            (int): The incremented value
        """
        return int(self.cache.incr(key, amount))

    def decr(self, key, amount=1):
        """Decrements a key by an amount.

        If the key is not found, then its value becomes the decrement amount specified

        Args:
            key: The key to decrement
            amount (int, optional): The amount to decrement the key by. Defaults to 1
            retry (bool, optional): Should this operation be retried if the transaction times out? Defaults to
                `self.retry`

        Returns:
            (int): The decremented value
        """
        return int(self.cache.decr(key, amount))

    def rpush(self, key, *values):
        """Pushes a value to the right of a deque.

        This operation also creates a deque for a given key if one was not already created. Otherwise it uses the
        existing deque

        Args:
            key: The key of the deque to push the values to
            *values: The values to push to the deque
        """
        return self.cache.rpush(key, *values)

    def rpop(self, key):
        """Pops a value from the right of a deque.

        If this key is not a deque then this function will return None.

        Args:
            key: The key of the deque to push the values to
            *values: The values to push to the deque

        Returns:
            The rightmost value on the deque or None if the key is not a deque or the deque is empty
        """
        return self._decode_response(self.cache.rpop(key))

    def lpush(self, key, *values):
        """Pushes a value to the left of a deque.

        This operation also creates a deque for a given key if one was not already created. Otherwise it uses the
        existing deque

        Args:
            key: The key of the deque to push the values to
            *values: The values to push to the deque
        """
        return self.cache.lpush(key, *values)

    def lpop(self, key):
        """Pops a value from the left of a deque.

        If this key is not a deque then this function will return None.

        Args:
            key: The key of the deque to push the values to
            *values: The values to push to the deque

        Returns:
            The leftmost value on the deque or None if the key is not a deque or the deque is empty
        """
        return self._decode_response(self.cache.lpop(key))

    @staticmethod
    def _decode_response(response):
        if response is None:
            return response
        try:
            return response.decode('utf-8')
        except UnicodeDecodeError:
            return response

    def subscribe(self, channel):
        """Subscribe to a channel

        Args:
            channel (str): The name of the channel to subscribe to

        Returns:
            (RedisSubscription): The subscription for this channel
        """
        subscription = self.cache.pubsub()
        subscription.subscribe(channel)
        subscription.get_message()
        return RedisSubscription(channel, subscription)

    def unsubscribe(self, channel):
        """Unsubscribe to a channel

        Args:
            channel (str): The name of the channel to subscribe to

        Returns:
            (int): The number of subscribers unsubscribed ffrom this channel
        """
        return self.cache.publish(channel, unsubscribe_message)

    def publish(self, channel, data):
        """Publish some data to a channel

        Args:
            channel (str): The name of the channel to publish the data to
            data: The data to publish

        Returns:
            The number of subscriptions which received the data
        """
        return self.cache.publish(channel, data)

    def shutdown(self):
        """Shuts down the connection to the cache

        For the Redis cache, this is not necessary. Redis's ConnectionPool should handle it
        """
        pass

    def clear(self):
        """Clears all values in the cache
        """
        self.cache.flushdb()

    def check(self):
        self.cache.info()

    def ping(self):
        """Pings the Redis cache to test the connection

        Returns:
            (Bool): True if the ping was successful, False otherwise.
        """
        return self.cache.ping()

    def scan(self, pattern=None):
        """Scans through all keys in the cache

        Args:
            pattern (str, optional): Regex Pattern to search for

        Returns:
            Iterator(str): The keys in the cache matching the pattern if specified. Else all the keys in the cache
        """
        return (key.decode('utf-8') for key in self.cache.scan_iter(pattern))

    def exists(self, key):
        """Checks to see if a key exists in the cache

        Args:
            key: The key to check

        Returns:
            bool: Does the key exist?
        """
        return bool(self.cache.exists(key))

    def lock(self, name, timeout=None, sleep=0.1, blocking_timeout=None):
        """Gets a distributed lock backed by the cache

        Args:
            name (str): The name of the lock
            timeout (float): The maximum life for the lock in seconds. If none is specified, it will remain locked
                until release() is called on the lock
            sleep (float): The amount of time to sleep per loop iteration when the lock is in blocking mode and another
                client is currently holding the lock
            blocking_timeout (float): The maximum amount of time in seconds to spend trying to acquire the lock. If
                none is specified, the lock will continue trying forever
        Returns:
            A lock
        """
        return self.cache.lock(name,
                               timeout=timeout,
                               sleep=sleep,
                               blocking_timeout=blocking_timeout)

    @classmethod
    def from_json(cls, json_in):
        """Constructs this cache from its JSON representation

        Args:
            json_in (dict): The JSON representation of this cache configuration

        Returns:
            (RedisCacheAdapter): A RedisCacheAdapter with a configuration reflecting the values in the JSON
        """
        password = os.getenv('WALKOFF_REDIS_PASSWORD')
        if password is not None:
            json_in['password'] = password
        if 'timeout' in json_in and json_in['timeout'] > 0:
            json_in['socket_timeout'] = json_in.pop('timeout')
        return cls(**json_in)
Beispiel #2
0
        recordID = 'request:%s'%id
        arg = r.hget(recordID,"arg")
        debug = r.hget(recordID,"debug")
        if debug: print >> sys.stderr,name,id,arg
        count += 1
        try:
            result = name2function[name](arg)
            status = "done"
        except Exception,e:
            status = "error"
            result = "%s"%e
            count_err += 1
        r.hset(recordID,"result",result)
        r.hset(recordID,"status",status)
        r.rpush('result:%s'%id,status)
    for n in names: r.decr('worker-count:%s'%n)
    print >> sys.stderr,"worker-%s terminated after processing %s requests (%s of which failed)" % (
        workerID, count, count_err)

def shutdown(workerID):
    r.rpush('shutdown:%s'%workerID,'*')
    # should be restricted to existing workers 

def multi_call(function, args, wait_for_worker=False):
    if not wait_for_worker:
        assert r.get('worker-count:%s'%function)>0
    ids = []
    for arg in args:
        resultID = r.incr('last-result-id')
        r.hmset('request:%s'%resultID,{'function':function,'arg':arg,'status':'todo'})
        r.rpush('call:%s'%function,resultID)
Beispiel #3
0
class HandlerRedisDB0(object):
    def __init__(self, sender_qq, gid, nickname, enable_filter=False):
        self.redis = Redis('localhost', 6379)
        self.sender_qq = sender_qq
        self.gid = gid
        self.nickname = nickname
        self.filter_enabled = enable_filter

    def active(self):
        self.redis.sadd('daily_active_user', self.sender_qq)
        self.redis.sadd('daily_active_group', self.gid)
        self.redis.sadd('active_group_%s_qq' % self.gid, self.sender_qq)
        self.redis.set(self.sender_qq, self.nickname)

    def get_money_dict(self):
        money_dict = {}
        active_qqs = self.redis.smembers('active_group_%s_qq' % self.gid)
        for qq in active_qqs:
            nickname = self.redis.get(qq)
            money = self.redis.get('money-%s' % qq)
            if money is not None and nickname is not None:
                money_dict[nickname] = int(money)
        return money_dict

    def enable_group_replay(self):
        self.redis.set('group-%s-enable' % self.gid, 1)

    def disable_group_replay(self):
        self.redis.set('group-%s-enable' % self.gid, 0)

    def reply_group(self):
        r = self.redis.get('group-%s-enable' % self.gid)
        if r is not None and r == '0':
            return False
        return True

    def get_state(self):
        r = self.redis.get('state-' + self.sender_qq)
        if r is None:
            self.redis.set('state-' + self.sender_qq, 0)
            r = '0'
        return r

    def chg_state(self):
        key = 'state-' + self.sender_qq
        state = self.get_state()
        state = int(state)
        if not state:
            self.redis.set(key, 1, 3600)
        else:
            state = (state + 1) % 3
            self.redis.set(key, state, 3600)

    def get_joke_rank(self):
        rank_key = 'joke_rank-'+self.gid
        rank = self.redis.get(rank_key)
        if not rank:
            self.redis.set(rank_key, 40)
            rank = '40'
        joke_key = 'joke-' + rank
        joke = self.redis.get(joke_key)
        if not joke:
            joke = ''
        joke = joke.decode('utf-8')
        return joke, rank

    def chg_joke_rank(self):
        key = 'joke_rank-' + self.gid
        remain = self.redis.get(key)
        if not remain:
            self.redis.set(key, 40)
            remain = '40'
        remain = int(remain)
        if remain == 1:
            self.redis.set(key, 40)
        else:
            self.redis.decr(key)

    def read_over(self):
        self.redis.set('read-' + self.gid, 1, 86400)

    def has_read(self):
        done = self.redis.get('read-' + self.gid)
        if done == '1':
            return True
        return False

    def get_money(self):
        key = 'money-' + self.sender_qq
        balance = self.redis.get(key)
        if balance is None:
            self.redis.set(key, 40)
            return 40
        return int(balance)

    def add_money(self, money=1):
        key = 'money-' + self.sender_qq
        self.redis.incr(key, money)

    def reduce_money(self, reductions):
        balance = self.get_money()
        if balance < reductions:
            return False, balance
        self.redis.decr('money-' + self.sender_qq, reductions)
        return True, balance-reductions

    def add_question(self, question):
        self.redis.set('question-' + self.sender_qq, question, 7200)

    def get_question(self):
        question = self.redis.get('question-' + self.sender_qq)
        return question
Beispiel #4
0
def reduce_creepy(name):
    con = Redis('localhost')
    con.decr('coworker{0}'.format(name))
Beispiel #5
0
from redis import Redis
redis_connection = Redis(decode_responses=True)
key = "some-key"
value = 15
redis_connection.set(key, value)
print(redis_connection.get(key))  #wypisanie
print(redis_connection.incr(key, 15))  #dodawanie
print(redis_connection.decr(key, 20))  #odejmowanie
Beispiel #6
0
from django.test import TestCase

# Create your tests here.
from redis import Redis

red = Redis(host='192.168.217.129', port=7000)

# red.set("age", 18)
# print(red.get("age"))
# red.delete('age')
red.decr('age')
print(red.get("age"))
if red.get("age"):
    print(123)
else:
    print(21)
Beispiel #7
0
def reduce_creepy(name):
    con = Redis('localhost') 
    con.decr('coworker{0}'.format(name))
Beispiel #8
0
from redis import Redis


redis_connection = Redis(decode_responses=True)

key ="kluczv4"
value =50

redis_connection.set(key, value)

print(redis_connection.get(key))

print(redis_connection.incr(key,10))

print(redis_connection.decr(key,40))
Beispiel #9
0
from redis import Redis

redis_connection = Redis(db=1, decode_responses=True)

key = "some-key-B2"
value = 100

key2 = "some-key3"
value2 = 123

redis_connection.set(key, value)
redis_connection.set(key2, value2)

print(redis_connection.get(key))

print(redis_connection.incr(key, 50))

print(redis_connection.decr(key, 23))
Beispiel #10
0
class RedisCacheAdapter(object):
    instance = None

    def __new__(cls, *args, **kwargs):
        if cls.instance is None:
            cls.instance = super(RedisCacheAdapter, cls).__new__(cls)
            logger.info('Created redis cache connection')
        return cls.instance

    def __init__(self, **opts):
        self.cache = Redis(**opts)
        logger.info('Created redis cache connection with options: {}'.format(opts))

    def set(self, key, value, expire=None, **opts):
        """Set a value for a key in the cache

        Args:
            key: The key to use for this data
            value: The value to set this key to
            expire (int|datetime.timedelta, optional): The expiration for this value. If `int` is passed, it indicates
                milliseconds
            **opts: Additional options to use. See `FanoutCache` for more details

        Returns:
            (bool): Was this key set?
        """
        return self.cache.set(key, value, px=expire, **opts)

    def get(self, key, **opts):
        """Gets the value stored in the key

        Args:
            key: The key to get the value from
            **opts: Additional options to use.

        Returns:
            The value stored in the key
        """
        return self._decode_response(self.cache.get(key))

    def add(self, key, value, expire=None, **opts):
        """Add a key and a value to the cache if the key is not already in the cache

        Args:
            key: The key to store the value to
            value: Teh value to store in the key
            expire (int|datetime.timedelta, optional): The expiration for this value. If `int` is passed, it indicates
                milliseconds
            **opts: Additional options to use. See `FanoutCache` for more details

        Returns:
            (bool): Was the key set?
        """
        return self.cache.set(key, value, px=expire, nx=True, **opts)

    def delete(self, key):
        """Deletes a key
        """
        return self.cache.delete(key)

    def incr(self, key, amount=1):
        """Increments a key by an amount.

        If the key is not found, then its value becomes the increment amount specified

        Args:
            key: The key to increment
            amount (int, optional): The amount to increment the key by. Defaults to 1
            retry (bool, optional): Should this operation be retried if the transaction times out? Defaults to
                `self.retry`

        Returns:
            (int): The incremented value
        """
        return int(self.cache.incr(key, amount))

    def decr(self, key, amount=1):
        """Decrements a key by an amount.

        If the key is not found, then its value becomes the decrement amount specified

        Args:
            key: The key to decrement
            amount (int, optional): The amount to decrement the key by. Defaults to 1
            retry (bool, optional): Should this operation be retried if the transaction times out? Defaults to
                `self.retry`

        Returns:
            (int): The decremented value
        """
        return int(self.cache.decr(key, amount))

    def rpush(self, key, *values):
        """Pushes a value to the right of a deque.

        This operation also creates a deque for a given key if one was not already created. Otherwise it uses the
        existing deque

        Args:
            key: The key of the deque to push the values to
            *values: The values to push to the deque
        """
        return self.cache.rpush(key, *values)

    def rpop(self, key):
        """Pops a value from the right of a deque.

        If this key is not a deque then this function will return None.

        Args:
            key: The key of the deque to push the values to
            *values: The values to push to the deque

        Returns:
            The rightmost value on the deque or None if the key is not a deque or the deque is empty
        """
        return self._decode_response(self.cache.rpop(key))

    def lpush(self, key, *values):
        """Pushes a value to the left of a deque.

        This operation also creates a deque for a given key if one was not already created. Otherwise it uses the
        existing deque

        Args:
            key: The key of the deque to push the values to
            *values: The values to push to the deque
        """
        return self.cache.lpush(key, *values)

    def lpop(self, key):
        """Pops a value from the left of a deque.

        If this key is not a deque then this function will return None.

        Args:
            key: The key of the deque to push the values to
            *values: The values to push to the deque

        Returns:
            The leftmost value on the deque or None if the key is not a deque or the deque is empty
        """
        return self._decode_response(self.cache.lpop(key))

    @staticmethod
    def _decode_response(response):
        if response is None:
            return response
        try:
            return response.decode('utf-8')
        except UnicodeDecodeError:
            return response

    def subscribe(self, channel):
        """Subscribe to a channel

        Args:
            channel (str): The name of the channel to subscribe to

        Returns:
            (RedisSubscription): The subscription for this channel
        """
        subscription = self.cache.pubsub()
        subscription.subscribe(channel)
        subscription.get_message()
        return RedisSubscription(channel, subscription)

    def unsubscribe(self, channel):
        """Unsubscribe to a channel

        Args:
            channel (str): The name of the channel to subscribe to

        Returns:
            (int): The number of subscribers unsubscribed ffrom this channel
        """
        return self.cache.publish(channel, unsubscribe_message)

    def publish(self, channel, data):
        """Publish some data to a channel

        Args:
            channel (str): The name of the channel to publish the data to
            data: The data to publish

        Returns:
            The number of subscriptions which received the data
        """
        return self.cache.publish(channel, data)

    def shutdown(self):
        """Shuts down the connection to the cache

        For the Redis cache, this is not necessary. Redis's ConnectionPool should handle it
        """
        pass

    def clear(self):
        """Clears all values in the cache
        """
        self.cache.flushdb()

    def check(self):
        self.cache.info()

    def ping(self):
        """Pings the Redis cache to test the connection

        Returns:
            (Bool): True if the ping was successful, False otherwise.
        """
        return self.cache.ping()

    def scan(self, pattern=None):
        """Scans through all keys in the cache

        Args:
            pattern (str, optional): Regex Pattern to search for

        Returns:
            Iterator(str): The keys in the cache matching the pattern if specified. Else all the keys in the cache
        """
        return (key.decode('utf-8') for key in self.cache.scan_iter(pattern))

    def exists(self, key):
        """Checks to see if a key exists in the cache

        Args:
            key: The key to check

        Returns:
            bool: Does the key exist?
        """
        return bool(self.cache.exists(key))

    def lock(self, name, timeout=None, sleep=0.1, blocking_timeout=None):
        """Gets a distributed lock backed by the cache

        Args:
            name (str): The name of the lock
            timeout (float): The maximum life for the lock in seconds. If none is specified, it will remain locked
                until release() is called on the lock
            sleep (float): The amount of time to sleep per loop iteration when the lock is in blocking mode and another
                client is currently holding the lock
            blocking_timeout (float): The maximum amount of time in seconds to spend trying to acquire the lock. If
                none is specified, the lock will continue trying forever
        Returns:
            A lock
        """
        return self.cache.lock(name, timeout=timeout, sleep=sleep, blocking_timeout=blocking_timeout)

    @classmethod
    def from_json(cls, json_in):
        """Constructs this cache from its JSON representation

        Args:
            json_in (dict): The JSON representation of this cache configuration

        Returns:
            (RedisCacheAdapter): A RedisCacheAdapter with a configuration reflecting the values in the JSON
        """
        password = os.getenv('WALKOFF_REDIS_PASSWORD')
        if password is not None:
            json_in['password'] = password
        if 'timeout' in json_in and json_in['timeout'] > 0:
            json_in['socket_timeout'] = json_in.pop('timeout')
        return cls(**json_in)
# Exibir chaves existentes.
print(f'\nChaves existentes: {con.keys()}')

# Inserindo uma chave e valor.
print('\nCriando uma nova chave:')
con.set('chave1', 'valor1')
print(f'Chaves existentes: {con.keys()}')

# Acessando valor de uma chave.
print('Valor da chave:', con.get('chave1'))

# Criando outra chave.
print('\nCriando outra chave:')
con.set('chave2', 1)
print(f'Chaves existentes: {con.keys()}')
print('Valor da chave2:', con.get('chave2'))

# Incrementando valor da chave.
print('incrementando a chave2:', con.incr('chave2'))
print('incrementando a chave2:', con.incr('chave2'))
print('decrementando a chave2:', con.decr('chave2'))

# Removendo uma chave.
print('\nRemovendo a chave2:')
con.delete('chave2')

# Caso a chave não exista é retornado None.
print('Tentando exibir a chave que foi removida')
print('Valor da chave:', con.get('chave2'))
print(f'Chaves existentes: {con.keys()}')
Beispiel #12
0
redis_port = '9250'
password = ''
hostname = 'ec2-54-183-15-168.us-west-1.compute.amazonaws.com'

# mc = Client([hostname + ':' + memcached_port], debug=0)
mc = Redis(host=hostname, port=redis_port, password=password)

start_time = datetime.now()
mc.set("first_key", "first value")
value = mc.get("first_key")
print(value)
mc.set("second_key", 3)

mc.delete("second_key")

mc.set("key", "1")  # note that the key used for incr/decr must be
# a string.
value = mc.get('key')
print(value)
mc.incr("key")
value = mc.get('key')
print(value)
mc.decr("key")
value = mc.get('key')
print(value)

end_time = datetime.now()
elapsed_time = end_time - start_time

print('Total Time = ', elapsed_time.microseconds / 1000, ' ms')
class RateLimit(object):
    """
    This class offers an abstraction of a Rate Limit algorithm implemented on
    top of Redis >= 2.6.0.
    """
    def __init__(self,
                 resource,
                 client,
                 max_requests,
                 expire=None,
                 blocking=True,
                 acquire_timeout=None,
                 r_connection=None,
                 first_acquire_waiting_limit=None):
        """
        Class initialization method checks if the Rate Limit algorithm is
        actually supported by the installed Redis version and sets some
        useful properties.

        If Rate Limit is not supported, it raises an Exception.

        :param resource: resource identifier string (i.e. ‘user_pictures’)
        :param client: client identifier string (i.e. ‘192.168.0.10’)
        :param max_requests: integer (i.e. ‘10’)
        :param expire: seconds to wait before resetting counters (i.e. ‘60’)
        :param acquire_timeout: (if present) raise exception if unable to acquire quota this long, 0 - wait forever
        :param first_acquire_waiting_limit: (if present) max number of waiting instances on first acquire.
        """
        if r_connection:
            self._redis = r_connection
        else:
            self._redis = Redis(connection_pool=REDIS_POOL)

        if not self._is_rate_limit_supported():
            raise RedisVersionNotSupported()

        self._rate_limit_key = "rate_limit:{0}_{1}".format(resource, client)
        self._num_waiting_key = "waiting_rate_limit:{0}_{1}".format(
            resource, client)
        self._lock_num_waiting_key = '%s:lock' % self._num_waiting_key

        self._max_requests = max_requests
        self.first_acquire_waiting_limit = first_acquire_waiting_limit
        self._expire = expire or 1  # limit requests per this period of time
        if acquire_timeout is not None:
            self._acquire_timeout = acquire_timeout
        else:
            self._acquire_timeout = self._expire * 5
        self._acquire_check_interval = self._expire / 10.  # if quota is empty, retry after this period
        self.acquired_times = 0  # number of times rate limiter was used
        self.acquire_attempt = 0  # current attempt to acquire quota

        self.blocking = blocking

    def __enter__(self):
        try:
            try:
                if self.acquire_attempt > 0:
                    raise GaveUp('Do not nest the usage of %r instance!' %
                                 self)

                if not self._max_requests:  # effectively do not control rate limit
                    return

                self.check_waiting_instances_limit()

                acquire_attempt_start = datetime.datetime.now()
                self.acquire_attempt = 1
                while True:
                    if (0 < self._acquire_timeout <
                        (datetime.datetime.now() -
                         acquire_attempt_start).seconds and self.blocking):
                        raise QuotaTimeout(
                            'Unable to acquire quota in %.2f secs' %
                            self._acquire_timeout)

                    try:
                        self.increment_usage()
                    except TooManyRequests:
                        if not self.blocking:
                            raise
                        else:
                            if self.acquire_attempt == 1:
                                self._redis.incr(self._num_waiting_key
                                                 )  # +1 process waiting
                            self.acquire_attempt += 1
                            time.sleep(self._acquire_check_interval)
                    else:
                        break
            finally:
                if self.acquire_attempt > 1:
                    self._redis.decr(self._num_waiting_key)

        except Exception:
            self.acquire_attempt = 0
            raise
        else:
            self.acquired_times += 1

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.acquire_attempt = 0

    def check_waiting_instances_limit(self):
        with redis_lock.Lock(self._redis,
                             key=self._lock_num_waiting_key,
                             check_interval=0.1):
            if (self.first_acquire_waiting_limit is
                    not None  # limiting is necessary
                    and self.acquired_times == 0  # first acquire
                    and self.has_been_reached()
                    and self.number_of_waiting_for_quota() >=
                    self.first_acquire_waiting_limit):

                raise TooManyWaitingInstances()

    def get_usage(self):
        """
        Returns actual resource usage by client. Note that it could be greater
        than the maximum number of requests set.

        :return: integer: current usage
        """
        return int(self._redis.get(self._rate_limit_key) or 0)

    def has_been_reached(self):
        """
        Checks if Rate Limit has been reached.

        :return: bool: True if limit has been reached or False otherwise
        """
        return self.get_usage() >= self._max_requests

    def number_of_waiting_for_quota(self):
        """
        Checks how much RateLimiter instances are waiting for quota
        :return: int: quantity
        """
        return int(self._redis.get(self._num_waiting_key) or 0)

    def increment_usage(self):
        """
        Calls a LUA script that should increment the resource usage by client.

        If the resource limit overflows the maximum number of requests, this
        method raises an Exception.

        :return: integer: current usage
        """
        # perform check first, so not even try to increment usage if not quota is left
        if self.has_been_reached():
            raise TooManyRequests()

        try:
            current_usage = self._redis.evalsha(INCREMENT_SCRIPT_HASH, 1,
                                                self._rate_limit_key,
                                                self._expire)
        except NoScriptError:
            current_usage = self._redis.eval(INCREMENT_SCRIPT, 1,
                                             self._rate_limit_key,
                                             self._expire)
        # Due to race condition,
        # several `increment_usage()` instances might have passed the initial check. Example:
        #
        # quota = 10
        # C1. check quota -> 9
        # C2. check quota -> 9
        # C1. incr -> 10
        # C2. incr -> 11 (over quota!)
        #
        # So we check the actual usage after increment, too

        if int(current_usage) > self._max_requests:
            raise TooManyRequests()

        return current_usage

    def _is_rate_limit_supported(self):
        """
        Checks if Rate Limit is supported which can basically be found by
        looking at Redis database version that should be 2.6.0 or greater.

        :return: bool
        """
        redis_version = self._redis.info()['redis_version']
        is_supported = StrictVersion(redis_version) >= StrictVersion('2.6.0')
        return bool(is_supported)

    def _reset(self):
        """
        Deletes all keys that start with ‘rate_limit:’.
        """
        for rate_limit_key in self._redis.keys('rate_limit:*'):
            self._redis.delete(rate_limit_key)
Beispiel #14
0
conn.sadd('key', 'item1', 'item2')
conn.srem('key', 'item2')
conn.ismember('key', 'item') # not sure
conn.scard('key')
conn.smembers('key')
conn.smove('key1', 'key2', 'item')
conn.sdiff('key1', 'key2', 'key3') # 返回存在第一个集合,不在其他集合的元素
conn.sinter('key1', 'key2')
conn.sunion('key1', 'key2',)

# string
conn.set('key', '15')
conn.get('key')
conn.incr('key') # conn.incr('key', 1)
conn.incr('key', 5)
conn.decr('key', 5)
conn.incrbyfloat('key')
conn.incrbyfloat('key', -4.5)
conn.append('key', ' world')
conn.substr('key', 0, -2)
conn.setrange('key', 11, ' world!')

# zset
conn.zadd('zkey', 'member', 10, 'member1', 20)
conn.zrem('zkey', 'member1')
conn.zcard('zkey')
conn.zincrby('zkey', 'member', 10)
conn.zcount('zkey', 10, 20)
conn.zrank('zkey', 'member') # 分值从小到大排列
conn.zscore('zkey', 'member')
conn.zrange('zkey', 0, 9, withscores=True) # 返回前10名的成员和分数
def semi_decr(key, redis_conn: redis.Redis = None):
    value = redis_conn.decr(key) <= 0
    if value:
        redis_conn.delete(key)
    return value
Beispiel #16
0
    print rc.mget('s1', 's2')
    print DIVIDING

    # 键均不存在时才批量赋值
    mapping = dict(uxs1='vs1', uxs2='vs2')
    print rc.msetnx(mapping)
    print rc.mget('uxs1', 'uxs2')
    print DIVIDING

    # 键为name的value增值操作,默认为1,键不存在则被创建并设为amount
    rc.set('num', 1)
    print rc.incr('num', amount=2)
    print rc.incrby('num', amount=3)

    # 键为name的value减值操作,默认为1,键不存在则被创建并设为-amount
    print rc.decr('num', amount=3)
    print rc.decrby('num', amount=3)
    print DIVIDING

    # 键为name的string的值追加value
    rc.set('append', 'asd')
    print rc.append('append', 'asdasdasd')
    print rc.get('append')
    print DIVIDING

    # 返回键为name的string的子串
    print rc.substr('append', 0, 5)

    # 获取键的value值从start到end的子字符串
    print rc.getrange('append', 0, 5)